Commit c771f275 authored by Paul Durrant's avatar Paul Durrant Committed by Jan Beulich
Browse files

iommu: make use of type-safe DFN and MFN in exported functions


This patch modifies the declaration of the entry points to the IOMMU
sub-system to use dfn_t and mfn_t in place of unsigned long. A subsequent
patch will similarly modify the methods in the iommu_ops structure.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Reviewed-by: default avatarWei Liu <wei.liu2@citrix.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarRoger Pau Monne <roger.pau@citrix.com>
Acked-by: default avatarJan Beulich <jbeulich@suse.com>
Acked-by: default avatarJulien Grall <julien.grall@arm.com>
Reviewed-by: default avatarGeorge Dunlap <george.dunlap@citrix.com>
parent cc6e309c
......@@ -957,7 +957,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
if ( need_iommu(p2m->domain) &&
(lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) )
rc = iommu_iotlb_flush(p2m->domain, gfn_x(sgfn), 1UL << page_order);
rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
1UL << page_order);
else
rc = 0;
......
......@@ -2789,14 +2789,14 @@ static int _get_page_type(struct page_info *page, unsigned long type,
struct domain *d = page_get_owner(page);
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
gfn_t gfn = _gfn(mfn_to_gmfn(d, mfn_x(page_to_mfn(page))));
mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
iommu_ret = iommu_unmap_page(d, gfn_x(gfn));
iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)));
else if ( type == PGT_writable_page )
iommu_ret = iommu_map_page(d, gfn_x(gfn),
mfn_x(page_to_mfn(page)),
IOMMUF_readable|IOMMUF_writable);
iommu_ret = iommu_map_page(d, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable |
IOMMUF_writable);
}
}
......
......@@ -881,15 +881,19 @@ out:
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else
{
dfn_t dfn = _dfn(gfn);
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
{
rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
rc = iommu_map_page(d, dfn_add(dfn, i),
mfn_add(mfn, i), iommu_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(p2m->domain, gfn + i) )
if ( iommu_unmap_page(p2m->domain,
dfn_add(dfn, i)) )
continue;
break;
......@@ -898,7 +902,7 @@ out:
else
for ( i = 0; i < (1 << order); i++ )
{
ret = iommu_unmap_page(d, gfn + i);
ret = iommu_unmap_page(d, dfn_add(dfn, i));
if ( !rc )
rc = ret;
}
......
......@@ -688,29 +688,36 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
if ( iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
{
rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
iommu_pte_flags);
if ( unlikely(rc) )
else
{
dfn_t dfn = _dfn(gfn);
if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
{
while ( i-- )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(p2m->domain, gfn + i) )
continue;
rc = iommu_map_page(p2m->domain, dfn_add(dfn, i),
mfn_add(mfn, i), iommu_pte_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(p2m->domain,
dfn_add(dfn, i)) )
continue;
break;
break;
}
}
}
else
for ( i = 0; i < (1UL << page_order); i++ )
{
int ret = iommu_unmap_page(p2m->domain, gfn + i);
else
for ( i = 0; i < (1UL << page_order); i++ )
{
int ret = iommu_unmap_page(p2m->domain,
dfn_add(dfn, i));
if ( !rc )
rc = ret;
}
if ( !rc )
rc = ret;
}
}
}
/*
......
......@@ -723,9 +723,11 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
if ( need_iommu(p2m->domain) )
{
dfn_t dfn = _dfn(mfn);
for ( i = 0; i < (1 << page_order); i++ )
{
int ret = iommu_unmap_page(p2m->domain, mfn + i);
int ret = iommu_unmap_page(p2m->domain, dfn_add(dfn, i));
if ( !rc )
rc = ret;
......@@ -782,16 +784,17 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
{
if ( need_iommu(d) && t == p2m_ram_rw )
{
dfn_t dfn = _dfn(mfn_x(mfn));
for ( i = 0; i < (1 << page_order); i++ )
{
rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
mfn_x(mfn_add(mfn, i)),
rc = iommu_map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
IOMMUF_readable|IOMMUF_writable);
if ( rc != 0 )
{
while ( i-- > 0 )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
if ( iommu_unmap_page(d, dfn_add(dfn, i)) )
continue;
return rc;
......@@ -1170,7 +1173,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
{
if ( !need_iommu(d) )
return 0;
return iommu_map_page(d, gfn_l, gfn_l, IOMMUF_readable|IOMMUF_writable);
return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
......@@ -1260,7 +1264,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
{
if ( !need_iommu(d) )
return 0;
return iommu_unmap_page(d, gfn_l);
return iommu_unmap_page(d, _dfn(gfn_l));
}
gfn_lock(p2m, gfn, 0);
......
......@@ -1430,13 +1430,14 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
!need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(hardware_domain, i) )
if ( iommu_unmap_page(hardware_domain, _dfn(i)) )
continue;
goto destroy_m2p;
......
......@@ -1143,13 +1143,13 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
IOMMUF_readable|IOMMUF_writable);
err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
err = iommu_map_page(ld, mfn_x(mfn), mfn_x(mfn),
err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
IOMMUF_readable);
}
if ( err )
......@@ -1398,10 +1398,10 @@ unmap_common(
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
err = iommu_unmap_page(ld, mfn_x(op->mfn));
err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)));
else if ( !(kind & MAPKIND_WRITE) )
err = iommu_map_page(ld, mfn_x(op->mfn),
mfn_x(op->mfn), IOMMUF_readable);
err = iommu_map_page(ld, _dfn(mfn_x(op->mfn)), op->mfn,
IOMMUF_readable);
double_gt_unlock(lgt, rgt);
......
......@@ -835,11 +835,11 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
this_cpu(iommu_dont_flush_iotlb) = 0;
ret = iommu_iotlb_flush(d, xatp->idx - done, done);
ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
......
......@@ -285,7 +285,7 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
int iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
......@@ -294,13 +294,13 @@ int iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
rc = hd->platform_ops->map_page(d, dfn_x(dfn), mfn_x(mfn), flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU mapping dfn %#lx to mfn %#lx failed: %d\n",
d->domain_id, dfn, mfn, rc);
"d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
......@@ -309,7 +309,7 @@ int iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
return rc;
}
int iommu_unmap_page(struct domain *d, unsigned long dfn)
int iommu_unmap_page(struct domain *d, dfn_t dfn)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
......@@ -317,13 +317,13 @@ int iommu_unmap_page(struct domain *d, unsigned long dfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->unmap_page(d, dfn);
rc = hd->platform_ops->unmap_page(d, dfn_x(dfn));
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU unmapping dfn %#lx failed: %d\n",
d->domain_id, dfn, rc);
"d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
d->domain_id, dfn_x(dfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
......@@ -349,8 +349,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(), &cpu_online_map));
}
int iommu_iotlb_flush(struct domain *d, unsigned long dfn,
unsigned int page_count)
int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
......@@ -358,13 +357,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long dfn,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
return 0;
rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
rc = hd->platform_ops->iotlb_flush(d, dfn_x(dfn), page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU IOTLB flush failed: %d, dfn %#lx, page count %u\n",
d->domain_id, rc, dfn, page_count);
"d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u\n",
d->domain_id, rc, dfn_x(dfn), page_count);
if ( !is_hardware_domain(d) )
domain_crash(d);
......
......@@ -60,4 +60,3 @@ void flush_all_cache()
{
wbinvd();
}
......@@ -241,7 +241,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn),
IOMMUF_readable | IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
......
......@@ -24,6 +24,7 @@
#include <xen/spinlock.h>
#include <xen/pci.h>
#include <xen/typesafe.h>
#include <xen/mm.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
#include <asm/device.h>
......@@ -42,6 +43,11 @@ TYPE_SAFE(uint64_t, dfn);
#undef dfn_x
#endif
static inline dfn_t dfn_add(dfn_t dfn, unsigned long i)
{
return _dfn(dfn_x(dfn) + i);
}
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx;
......@@ -78,9 +84,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
int __must_check iommu_map_page(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags);
int __must_check iommu_unmap_page(struct domain *d, unsigned long dfn);
int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
mfn_t mfn, unsigned int flags);
int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
enum iommu_feature
{
......@@ -203,7 +209,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
int __must_check iommu_iotlb_flush(struct domain *d, unsigned long dfn,
int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
unsigned int page_count);
int __must_check iommu_iotlb_flush_all(struct domain *d);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment