Commit 072268c1 authored by Paul Durrant's avatar Paul Durrant Committed by Jan Beulich

iommu: introduce the concept of DFN...

...meaning 'device DMA frame number' i.e. a frame number mapped in the IOMMU
(rather than the MMU) and hence used for DMA address translation.

This patch is a largely cosmetic change that substitutes the terms 'gfn'
and 'gaddr' for 'dfn' and 'daddr' in all the places where the frame number
or address relate to a device rather than the CPU.

The parts that are not purely cosmetic are:

 - the introduction of a type-safe declaration of dfn_t and definition of
   INVALID_DFN to make the substitution of gfn_x(INVALID_GFN) mechanical.
 - the introduction of __dfn_to_daddr and __daddr_to_dfn (and type-safe
   variants without the leading __) with some use of the former.

Subsequent patches will convert code to make use of type-safe DFNs.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Acked-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Acked-by: default avatarJulien Grall <julien.grall@arm.com>
Acked-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
parent 8743d2de
......@@ -284,7 +284,7 @@ void invalidate_iommu_all(struct amd_iommu *iommu)
}
void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
uint64_t gaddr, unsigned int order)
daddr_t daddr, unsigned int order)
{
unsigned long flags;
struct amd_iommu *iommu;
......@@ -315,12 +315,12 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
/* send INVALIDATE_IOTLB_PAGES command */
spin_lock_irqsave(&iommu->lock, flags);
invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order);
invalidate_iotlb_pages(iommu, maxpend, 0, queueid, daddr, req_id, order);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr,
unsigned int order)
{
struct pci_dev *pdev;
......@@ -333,7 +333,7 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
u8 devfn = pdev->devfn;
do {
amd_iommu_flush_iotlb(devfn, pdev, gaddr, order);
amd_iommu_flush_iotlb(devfn, pdev, daddr, order);
devfn += pdev->phantom_stride;
} while ( devfn != pdev->devfn &&
PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
......@@ -342,7 +342,7 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
/* Flush iommu cache after p2m changes. */
static void _amd_iommu_flush_pages(struct domain *d,
uint64_t gaddr, unsigned int order)
daddr_t daddr, unsigned int order)
{
unsigned long flags;
struct amd_iommu *iommu;
......@@ -352,13 +352,13 @@ static void _amd_iommu_flush_pages(struct domain *d,
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
invalidate_iommu_pages(iommu, gaddr, dom_id, order);
invalidate_iommu_pages(iommu, daddr, dom_id, order);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
if ( ats_enabled )
amd_iommu_flush_all_iotlbs(d, gaddr, order);
amd_iommu_flush_all_iotlbs(d, daddr, order);
}
void amd_iommu_flush_all_pages(struct domain *d)
......@@ -367,9 +367,9 @@ void amd_iommu_flush_all_pages(struct domain *d)
}
void amd_iommu_flush_pages(struct domain *d,
unsigned long gfn, unsigned int order)
unsigned long dfn, unsigned int order)
{
_amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
_amd_iommu_flush_pages(d, __dfn_to_daddr(dfn), order);
}
void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
......
This diff is collapsed.
......@@ -548,7 +548,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
maddr_to_page(next_table_maddr), next_level,
address, indent + 1);
else
printk("%*sgfn: %08lx mfn: %08lx\n",
printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
(unsigned long)PFN_DOWN(address),
(unsigned long)PFN_DOWN(next_table_maddr));
......
......@@ -2551,7 +2551,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
}
static int __must_check arm_smmu_iotlb_flush(struct domain *d,
unsigned long gfn,
unsigned long dfn,
unsigned int page_count)
{
/* ARM SMMU v1 doesn't have flush by VMA and VMID */
......@@ -2748,7 +2748,7 @@ static void arm_smmu_iommu_domain_teardown(struct domain *d)
xfree(xen_domain);
}
static int __must_check arm_smmu_map_page(struct domain *d, unsigned long gfn,
static int __must_check arm_smmu_map_page(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags)
{
p2m_type_t t;
......@@ -2759,10 +2759,10 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long gfn,
* protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain
* p2m to allow DMA request to work.
* This is only valid when the domain is directed mapped. Hence this
* function should only be used by gnttab code with gfn == mfn.
* function should only be used by gnttab code with gfn == mfn == dfn.
*/
BUG_ON(!is_domain_direct_mapped(d));
BUG_ON(mfn != gfn);
BUG_ON(mfn != dfn);
/* We only support readable and writable flags */
if (!(flags & (IOMMUF_readable | IOMMUF_writable)))
......@@ -2774,19 +2774,19 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long gfn,
* The function guest_physmap_add_entry replaces the current mapping
* if there is already one...
*/
return guest_physmap_add_entry(d, _gfn(gfn), _mfn(mfn), 0, t);
return guest_physmap_add_entry(d, _gfn(dfn), _mfn(dfn), 0, t);
}
static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long dfn)
{
/*
* This function should only be used by gnttab code when the domain
* is direct mapped
* is direct mapped (i.e. gfn == mfn == dfn).
*/
if ( !is_domain_direct_mapped(d) )
return -EINVAL;
return guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
return guest_physmap_remove_page(d, _gfn(dfn), _mfn(dfn), 0);
}
static const struct iommu_ops arm_smmu_iommu_ops = {
......
......@@ -215,7 +215,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = mfn_x(page_to_mfn(page));
unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned long dfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
int ret;
......@@ -224,7 +224,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
ret = hd->platform_ops->map_page(d, dfn, mfn, mapping);
if ( !rc )
rc = ret;
......@@ -285,7 +285,7 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
int iommu_map_page(struct domain *d, unsigned long dfn, unsigned long mfn,
unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
......@@ -294,13 +294,13 @@ int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->map_page(d, gfn, mfn, flags);
rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU mapping gfn %#lx to mfn %#lx failed: %d\n",
d->domain_id, gfn, mfn, rc);
"d%d: IOMMU mapping dfn %#lx to mfn %#lx failed: %d\n",
d->domain_id, dfn, mfn, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
......@@ -309,7 +309,7 @@ int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
return rc;
}
int iommu_unmap_page(struct domain *d, unsigned long gfn)
int iommu_unmap_page(struct domain *d, unsigned long dfn)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
......@@ -317,13 +317,13 @@ int iommu_unmap_page(struct domain *d, unsigned long gfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
rc = hd->platform_ops->unmap_page(d, gfn);
rc = hd->platform_ops->unmap_page(d, dfn);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU unmapping gfn %#lx failed: %d\n",
d->domain_id, gfn, rc);
"d%d: IOMMU unmapping dfn %#lx failed: %d\n",
d->domain_id, dfn, rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
......@@ -349,7 +349,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(), &cpu_online_map));
}
int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
int iommu_iotlb_flush(struct domain *d, unsigned long dfn,
unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
......@@ -358,13 +358,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
return 0;
rc = hd->platform_ops->iotlb_flush(d, gfn, page_count);
rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU IOTLB flush failed: %d, gfn %#lx, page count %u\n",
d->domain_id, rc, gfn, page_count);
"d%d: IOMMU IOTLB flush failed: %d, dfn %#lx, page count %u\n",
d->domain_id, rc, dfn, page_count);
if ( !is_hardware_domain(d) )
domain_crash(d);
......
......@@ -585,7 +585,7 @@ static int __must_check iommu_flush_all(void)
}
static int __must_check iommu_flush_iotlb(struct domain *d,
unsigned long gfn,
unsigned long dfn,
bool_t dma_old_pte_present,
unsigned int page_count)
{
......@@ -612,12 +612,12 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
if ( iommu_domid == -1 )
continue;
if ( page_count != 1 || gfn == gfn_x(INVALID_GFN) )
if ( page_count != 1 || dfn == dfn_x(INVALID_DFN) )
rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
0, flush_dev_iotlb);
else
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K,
__dfn_to_daddr(dfn),
PAGE_ORDER_4K,
!dma_old_pte_present,
flush_dev_iotlb);
......@@ -633,15 +633,15 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
}
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
unsigned long gfn,
unsigned long dfn,
unsigned int page_count)
{
return iommu_flush_iotlb(d, gfn, 1, page_count);
return iommu_flush_iotlb(d, dfn, 1, page_count);
}
static int __must_check iommu_flush_iotlb_all(struct domain *d)
{
return iommu_flush_iotlb(d, gfn_x(INVALID_GFN), 0, 0);
return iommu_flush_iotlb(d, dfn_x(INVALID_DFN), 0, 0);
}
/* clear one page's page table */
......@@ -1763,7 +1763,7 @@ static void iommu_domain_teardown(struct domain *d)
}
static int __must_check intel_iommu_map_page(struct domain *d,
unsigned long gfn,
unsigned long dfn,
unsigned long mfn,
unsigned int flags)
{
......@@ -1782,14 +1782,14 @@ static int __must_check intel_iommu_map_page(struct domain *d,
spin_lock(&hd->arch.mapping_lock);
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
pg_maddr = addr_to_dma_page_maddr(d, __dfn_to_daddr(dfn), 1);
if ( pg_maddr == 0 )
{
spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
pte = page + (gfn & LEVEL_MASK);
pte = page + (dfn & LEVEL_MASK);
old = *pte;
dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
dma_set_pte_prot(new,
......@@ -1813,22 +1813,22 @@ static int __must_check intel_iommu_map_page(struct domain *d,
unmap_vtd_domain_page(page);
if ( !this_cpu(iommu_dont_flush_iotlb) )
rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
rc = iommu_flush_iotlb(d, dfn, dma_pte_present(old), 1);
return rc;
}
static int __must_check intel_iommu_unmap_page(struct domain *d,
unsigned long gfn)
unsigned long dfn)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
return dma_pte_clear_one(d, __dfn_to_daddr(dfn));
}
int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
int order, int present)
{
struct acpi_drhd_unit *drhd;
......@@ -1852,7 +1852,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
continue;
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K,
__dfn_to_daddr(dfn),
order, !present, flush_dev_iotlb);
if ( rc > 0 )
{
......@@ -2622,7 +2622,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
address, indent + 1);
else
printk("%*sgfn: %08lx mfn: %08lx\n",
printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
(unsigned long)(address >> PAGE_SHIFT_4K),
(unsigned long)(dma_pte_addr(*pte) >> PAGE_SHIFT_4K));
......
......@@ -30,6 +30,18 @@ struct g2m_ioport {
unsigned int np;
};
#define IOMMU_PAGE_SHIFT 12
#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
typedef uint64_t daddr_t;
#define __dfn_to_daddr(dfn) ((daddr_t)(dfn) << IOMMU_PAGE_SHIFT)
#define __daddr_to_dfn(daddr) ((daddr) >> IOMMU_PAGE_SHIFT)
#define dfn_to_daddr(dfn) __dfn_to_daddr(dfn_x(dfn))
#define daddr_to_dfn(daddr) _dfn(__daddr_to_dfn(daddr))
struct arch_iommu
{
u64 pgd_maddr; /* io page directory machine address */
......
......@@ -23,11 +23,25 @@
#include <xen/page-defs.h>
#include <xen/spinlock.h>
#include <xen/pci.h>
#include <xen/typesafe.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
#include <asm/device.h>
#include <asm/iommu.h>
TYPE_SAFE(uint64_t, dfn);
#define PRI_dfn PRIx64
#define INVALID_DFN _dfn(~0ULL)
#ifndef dfn_t
#define dfn_t /* Grep fodder: dfn_t, _dfn() and dfn_x() are defined above */
#define _dfn
#define dfn_x
#undef dfn_t
#undef _dfn
#undef dfn_x
#endif
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx;
......@@ -64,9 +78,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
int __must_check iommu_map_page(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags);
int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
int __must_check iommu_unmap_page(struct domain *d, unsigned long dfn);
enum iommu_feature
{
......@@ -154,9 +168,9 @@ struct iommu_ops {
#endif /* HAS_PCI */
void (*teardown)(struct domain *d);
int __must_check (*map_page)(struct domain *d, unsigned long gfn,
int __must_check (*map_page)(struct domain *d, unsigned long dfn,
unsigned long mfn, unsigned int flags);
int __must_check (*unmap_page)(struct domain *d, unsigned long gfn);
int __must_check (*unmap_page)(struct domain *d, unsigned long dfn);
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
......@@ -167,7 +181,7 @@ struct iommu_ops {
void (*resume)(void);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn,
int __must_check (*iotlb_flush)(struct domain *d, unsigned long dfn,
unsigned int page_count);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
......@@ -189,7 +203,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
int __must_check iommu_iotlb_flush(struct domain *d, unsigned long dfn,
unsigned int page_count);
int __must_check iommu_iotlb_flush_all(struct domain *d);
......
......@@ -26,6 +26,11 @@
* A linear idea of a guest physical address space. For an auto-translated
* guest, pfn == gfn while for a non-translated guest, pfn != gfn.
*
* dfn: Device DMA Frame Number (definitions in include/xen/iommu.h)
* The linear frame numbers of device DMA address space. All initiators for
* (i.e. all devices assigned to) a guest share a single DMA address space
* and, by default, Xen will ensure dfn == pfn.
*
* WARNING: Some of these terms have changed over time while others have been
* used inconsistently, meaning that a lot of existing code does not match the
* definitions above. New code should use these terms as described here, and
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment