Commit cf95b2a9 authored by Jean Guyader's avatar Jean Guyader

iommu: Introduce per cpu flag (iommu_dont_flush_iotlb) to avoid unnecessary iotlb flush

Add cpu flag that will be checked by the iommu low level code
to skip iotlb flushes. iommu_iotlb_flush shall be called explicitly.
Signed-off-by: default avatarJean Guyader <jean.guyader@eu.citrix.com>
Committed-by: default avatarKeir Fraser <keir@xen.org>
parent e51e2e0e
......@@ -4794,10 +4794,15 @@ static int xenmem_add_to_physmap_once(struct domain *d,
static int xenmem_add_to_physmap(struct domain *d,
struct xen_add_to_physmap *xatp)
{
struct xen_add_to_physmap start_xatp;
int rc = 0;
if ( xatp->space == XENMAPSPACE_gmfn_range )
{
if ( need_iommu(d) )
this_cpu(iommu_dont_flush_iotlb) = 1;
start_xatp = *xatp;
while ( xatp->size > 0 )
{
rc = xenmem_add_to_physmap_once(d, xatp);
......@@ -4816,6 +4821,13 @@ static int xenmem_add_to_physmap(struct domain *d,
}
}
if ( need_iommu(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 0;
iommu_iotlb_flush(d, start_xatp.idx, start_xatp.size - xatp->size);
iommu_iotlb_flush(d, start_xatp.gpfn, start_xatp.size - xatp->size);
}
return rc;
}
......
......@@ -52,6 +52,8 @@ bool_t __read_mostly iommu_hap_pt_share = 1;
bool_t __read_mostly iommu_debug;
bool_t __read_mostly amd_iommu_perdev_intremap;
DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
static void __init parse_iommu_param(char *s)
{
char *ss;
......@@ -227,6 +229,7 @@ static int iommu_populate_page_table(struct domain *d)
spin_lock(&d->page_alloc_lock);
this_cpu(iommu_dont_flush_iotlb) = 1;
page_list_for_each ( page, &d->page_list )
{
if ( is_hvm_domain(d) ||
......@@ -244,6 +247,8 @@ static int iommu_populate_page_table(struct domain *d)
}
}
}
this_cpu(iommu_dont_flush_iotlb) = 0;
iommu_iotlb_flush_all(d);
spin_unlock(&d->page_alloc_lock);
return 0;
}
......
......@@ -663,7 +663,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
spin_unlock(&hd->mapping_lock);
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
__intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1);
if ( !this_cpu(iommu_dont_flush_iotlb) )
__intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1);
unmap_vtd_domain_page(page);
......@@ -1760,7 +1761,8 @@ static int intel_iommu_map_page(
spin_unlock(&hd->mapping_lock);
unmap_vtd_domain_page(page);
__intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
if ( !this_cpu(iommu_dont_flush_iotlb) )
__intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
return 0;
}
......
......@@ -164,4 +164,16 @@ int iommu_do_domctl(struct xen_domctl *, XEN_GUEST_HANDLE(xen_domctl_t));
void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
void iommu_iotlb_flush_all(struct domain *d);
/*
* The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
* avoid unecessary iotlb_flush in the low level IOMMU code.
*
* iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes
* this operation can be really expensive. This flag will be set by the
* caller to notify the low level IOMMU code to avoid the iotlb flushes.
* iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by
* the caller.
*/
DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
#endif /* _IOMMU_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment