Commit 725bf00a authored by Paul Durrant's avatar Paul Durrant Committed by Jan Beulich

iommu / p2m: add a page_order parameter to iommu_map/unmap_page()...

...and re-name them to iommu_map/unmap() since they no longer necessarily
operate on a single page.

The P2M code currently contains many loops to deal with the fact that,
while it may be require to handle page orders greater than 0, the
IOMMU map and unmap functions do not.
This patch adds a page_order parameter to those functions and implements
the necessary loops within. This allows the P2M code to be substantially
simplified.

This patch also adds emacs boilerplate to xen/iommu.h to avoid tabbing
problem.

NOTE: This patch does not modify the underlying vendor IOMMU
      implementations to deal with more than a single page at once.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reviewed-by: default avatarGeorge Dunlap <george.dunlap@citrix.com>
parent 29e28451
......@@ -2801,11 +2801,12 @@ static int _get_page_type(struct page_info *page, unsigned long type,
mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)));
iommu_ret = iommu_unmap(d, _dfn(mfn_x(mfn)),
PAGE_ORDER_4K);
else if ( type == PGT_writable_page )
iommu_ret = iommu_map_page(d, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable |
IOMMUF_writable);
iommu_ret = iommu_map(d, _dfn(mfn_x(mfn)), mfn,
PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
}
}
......
......@@ -881,33 +881,9 @@ out:
if ( iommu_use_hap_pt(d) )
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else if ( need_iommu_pt_sync(d) )
{
dfn_t dfn = _dfn(gfn);
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
{
rc = iommu_map_page(d, dfn_add(dfn, i),
mfn_add(mfn, i), iommu_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(p2m->domain,
dfn_add(dfn, i)) )
continue;
break;
}
}
else
for ( i = 0; i < (1 << order); i++ )
{
ret = iommu_unmap_page(d, dfn_add(dfn, i));
if ( !rc )
rc = ret;
}
}
rc = iommu_flags ?
iommu_map(d, _dfn(gfn), mfn, order, iommu_flags) :
iommu_unmap(d, _dfn(gfn), order);
}
unmap_domain_page(table);
......
......@@ -477,10 +477,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
int sve)
{
struct domain *d = p2m->domain;
/* XXX -- this might be able to be faster iff current->domain == d */
void *table;
unsigned long gfn = gfn_x(gfn_);
unsigned long i, gfn_remainder = gfn;
unsigned long gfn_remainder = gfn;
l1_pgentry_t *p2m_entry, entry_content;
/* Intermediate table to free if we're replacing it with a superpage. */
l1_pgentry_t intermediate_entry = l1e_empty();
......@@ -515,7 +516,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
t.gfn = gfn;
t.mfn = mfn_x(mfn);
t.p2mt = p2mt;
t.d = p2m->domain->domain_id;
t.d = d->domain_id;
t.order = page_order;
__trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
......@@ -683,41 +684,12 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
{
ASSERT(rc == 0);
if ( iommu_use_hap_pt(p2m->domain) )
{
if ( iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
else if ( need_iommu_pt_sync(p2m->domain) )
{
dfn_t dfn = _dfn(gfn);
if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
{
rc = iommu_map_page(p2m->domain, dfn_add(dfn, i),
mfn_add(mfn, i), iommu_pte_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(p2m->domain,
dfn_add(dfn, i)) )
continue;
break;
}
}
else
for ( i = 0; i < (1UL << page_order); i++ )
{
int ret = iommu_unmap_page(p2m->domain,
dfn_add(dfn, i));
if ( !rc )
rc = ret;
}
}
if ( need_iommu_pt_sync(p2m->domain) )
rc = iommu_pte_flags ?
iommu_map(d, _dfn(gfn), mfn, page_order, iommu_pte_flags) :
iommu_unmap(d, _dfn(gfn), page_order);
else if ( iommu_use_hap_pt(d) && iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
/*
......
......@@ -710,24 +710,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
p2m_access_t a;
if ( !paging_mode_translate(p2m->domain) )
{
int rc = 0;
if ( need_iommu_pt_sync(p2m->domain) )
{
dfn_t dfn = _dfn(mfn);
for ( i = 0; i < (1 << page_order); i++ )
{
int ret = iommu_unmap_page(p2m->domain, dfn_add(dfn, i));
if ( !rc )
rc = ret;
}
}
return rc;
}
return need_iommu_pt_sync(p2m->domain) ?
iommu_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
......@@ -773,28 +757,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
int rc = 0;
if ( !paging_mode_translate(d) )
{
if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
{
dfn_t dfn = _dfn(mfn_x(mfn));
for ( i = 0; i < (1 << page_order); i++ )
{
rc = iommu_map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
IOMMUF_readable|IOMMUF_writable);
if ( rc != 0 )
{
while ( i-- > 0 )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(d, dfn_add(dfn, i)) )
continue;
return rc;
}
}
}
return 0;
}
return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
iommu_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
IOMMUF_readable | IOMMUF_writable) : 0;
/* foreign pages are added thru p2m_add_foreign */
if ( p2m_is_foreign(t) )
......@@ -1164,8 +1129,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
{
if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
IOMMUF_readable | IOMMUF_writable);
return iommu_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
......@@ -1255,7 +1220,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
{
if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_unmap_page(d, _dfn(gfn_l));
return iommu_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
}
gfn_lock(p2m, gfn, 0);
......
......@@ -1436,14 +1436,15 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
!need_iommu_pt_sync(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
IOMMUF_readable | IOMMUF_writable) )
if ( iommu_map(hardware_domain, _dfn(i), _mfn(i),
PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(hardware_domain, _dfn(i)) )
if ( iommu_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) )
continue;
goto destroy_m2p;
......
......@@ -1134,14 +1134,14 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable | IOMMUF_writable);
err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable);
err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
IOMMUF_readable);
}
if ( err )
{
......@@ -1389,10 +1389,10 @@ unmap_common(
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)));
err = iommu_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
else if ( !(kind & MAPKIND_WRITE) )
err = iommu_map_page(ld, _dfn(mfn_x(op->mfn)), op->mfn,
IOMMUF_readable);
err = iommu_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
IOMMUF_readable);
double_gt_unlock(lgt, rgt);
......
......@@ -304,48 +304,78 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int flags)
int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
unsigned long i;
int rc = 0;
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
if ( unlikely(rc) )
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
for ( i = 0; i < (1ul << page_order); i++ )
{
rc = hd->platform_ops->map_page(d, dfn_add(dfn, i),
mfn_add(mfn, i), flags);
if ( likely(!rc) )
continue;
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
d->domain_id, dfn_x(dfn_add(dfn, i)),
mfn_x(mfn_add(mfn, i)), rc);
while ( i-- )
/* if statement to satisfy __must_check */
if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i)) )
continue;
if ( !is_hardware_domain(d) )
domain_crash(d);
break;
}
return rc;
}
int iommu_unmap_page(struct domain *d, dfn_t dfn)
int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
unsigned long i;
int rc = 0;
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->unmap_page(d, dfn);
if ( unlikely(rc) )
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
for ( i = 0; i < (1ul << page_order); i++ )
{
int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
if ( likely(!err) )
continue;
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
d->domain_id, dfn_x(dfn), rc);
d->domain_id, dfn_x(dfn_add(dfn, i)), err);
if ( !rc )
rc = err;
if ( !is_hardware_domain(d) )
{
domain_crash(d);
break;
}
}
return rc;
......
......@@ -241,8 +241,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn),
IOMMUF_readable | IOMMUF_writable);
rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
......
......@@ -88,9 +88,10 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
mfn_t mfn, unsigned int flags);
int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags);
int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
unsigned int page_order);
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
......@@ -268,3 +269,12 @@ extern struct spinlock iommu_pt_cleanup_lock;
extern struct page_list_head iommu_pt_cleanup_list;
#endif /* _IOMMU_H_ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment