Commit d40029c8 authored by Paul Durrant's avatar Paul Durrant Committed by Andrew Cooper

iommu: rename wrapper functions

A subsequent patch will add semantically different versions of
iommu_map/unmap() so, in advance of that change, this patch renames the
existing functions to iommu_legacy_map/unmap() and modifies all call-sites.
It also adjusts a comment that refers to iommu_map_page(), which was re-
named by a previous patch.

This patch is purely cosmetic. No functional change.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Acked-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
parent cbe21fd0
......@@ -2801,12 +2801,13 @@ static int _get_page_type(struct page_info *page, unsigned long type,
mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
iommu_ret = iommu_unmap(d, _dfn(mfn_x(mfn)),
PAGE_ORDER_4K);
iommu_ret = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)),
PAGE_ORDER_4K);
else if ( type == PGT_writable_page )
iommu_ret = iommu_map(d, _dfn(mfn_x(mfn)), mfn,
PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
iommu_ret = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn,
PAGE_ORDER_4K,
IOMMUF_readable |
IOMMUF_writable);
}
}
......
......@@ -885,8 +885,8 @@ out:
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else if ( need_iommu_pt_sync(d) )
rc = iommu_flags ?
iommu_map(d, _dfn(gfn), mfn, order, iommu_flags) :
iommu_unmap(d, _dfn(gfn), order);
iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
iommu_legacy_unmap(d, _dfn(gfn), order);
}
unmap_domain_page(table);
......
......@@ -694,8 +694,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
if ( need_iommu_pt_sync(p2m->domain) )
rc = iommu_pte_flags ?
iommu_map(d, _dfn(gfn), mfn, page_order, iommu_pte_flags) :
iommu_unmap(d, _dfn(gfn), page_order);
iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
iommu_pte_flags) :
iommu_legacy_unmap(d, _dfn(gfn), page_order);
else if ( iommu_use_hap_pt(d) && iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
......
......@@ -780,7 +780,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
if ( !paging_mode_translate(p2m->domain) )
return need_iommu_pt_sync(p2m->domain) ?
iommu_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
iommu_legacy_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
......@@ -827,8 +827,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
if ( !paging_mode_translate(d) )
return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
iommu_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
IOMMUF_readable | IOMMUF_writable) : 0;
iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
IOMMUF_readable | IOMMUF_writable) : 0;
/* foreign pages are added thru p2m_add_foreign */
if ( p2m_is_foreign(t) )
......@@ -1302,8 +1302,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
{
if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
......@@ -1393,7 +1393,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
{
if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
}
gfn_lock(p2m, gfn, 0);
......
......@@ -1436,15 +1436,16 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
!need_iommu_pt_sync(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map(hardware_domain, _dfn(i), _mfn(i),
PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable) )
if ( iommu_legacy_map(hardware_domain, _dfn(i), _mfn(i),
PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
if ( iommu_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) )
if ( iommu_legacy_unmap(hardware_domain, _dfn(i),
PAGE_ORDER_4K) )
continue;
goto destroy_m2p;
......
......@@ -1134,14 +1134,14 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable | IOMMUF_writable);
err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable);
err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable);
}
if ( err )
{
......@@ -1389,10 +1389,10 @@ unmap_common(
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
err = iommu_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
else if ( !(kind & MAPKIND_WRITE) )
err = iommu_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
IOMMUF_readable);
err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
IOMMUF_readable);
double_gt_unlock(lgt, rgt);
......
......@@ -853,11 +853,11 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
this_cpu(iommu_dont_flush_iotlb) = 0;
ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done);
ret = iommu_flush(d, _dfn(xatp->idx - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done);
ret = iommu_flush(d, _dfn(xatp->gpfn - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
......
......@@ -304,8 +304,8 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags)
int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
......@@ -345,7 +345,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
return rc;
}
int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
......
......@@ -241,8 +241,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
rc = iommu_legacy_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
......
......@@ -83,15 +83,21 @@ int iommu_construct(struct domain *d);
/* Function used internally, use iommu_domain_destroy */
void iommu_teardown(struct domain *d);
/* iommu_map_page() takes flags to direct the mapping operation. */
/*
* The following flags are passed to map operations and passed by lookup
* operations.
*/
#define _IOMMUF_readable 0
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags);
int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
unsigned int page_order);
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order,
unsigned int flags);
int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
unsigned int page_order);
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment