Commit ad4312d7 authored by Jan Beulich's avatar Jan Beulich

IOMMU/x86: disallow device assignment to PoD guests

While it is okay for IOMMU page tables to be set up for guests starting
in PoD mode, actual device assignment may only occur once all PoD
entries have been removed from the P2M. So far this was enforced only
for boot-time assignment, and only in the tool stack.

Also use the new function to replace p2m_pod_entry_count(): Its unlocked
access to p2m->pod.entry_count wasn't really okay (irrespective of the
result being stale by the time the caller gets to see it). Nor was the
use of that function in line with the immediately preceding comment: A
PoD guest isn't just one with a non-zero entry count, but also one with
a non-empty cache (e.g. prior to actually launching the guest).

To allow the tool stack to see a consistent snapshot of PoD state, move
the tail of XENMEM_{get,set}_pod_target handling into a function, adding
proper locking there.

In libxl take the liberty to use the new local variable r also for a
pre-existing call into libxc.
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
parent fe234237
......@@ -701,6 +701,12 @@ int p2m_pod_empty_cache(struct domain *d);
* domain matches target */
int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
/* Obtain a consistent snapshot of PoD related domain state. */
void p2m_pod_get_mem_target(const struct domain *d, xen_pod_target_t *target);
/* Check whether PoD is (still) active in a domain. */
bool p2m_pod_active(const struct domain *d);
/* Scan pod cache when offline/broken page triggered */
int
p2m_pod_offline_or_broken_hit(struct page_info *p);
......@@ -709,11 +715,6 @@ p2m_pod_offline_or_broken_hit(struct page_info *p);
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
{
return p2m->pod.entry_count;
}
#else
static inline bool
......@@ -727,6 +728,11 @@ static inline int p2m_pod_empty_cache(struct domain *d)
return 0;
}
static inline bool p2m_pod_active(const struct domain *d)
{
return false;
}
static inline int p2m_pod_offline_or_broken_hit(struct page_info *p)
{
return 0;
......@@ -737,11 +743,6 @@ static inline void p2m_pod_offline_or_broken_replace(struct page_info *p)
ASSERT_UNREACHABLE();
}
static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
{
return 0;
}
#endif
......
......@@ -4804,7 +4804,6 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
xen_pod_target_t target;
struct domain *d;
struct p2m_domain *p2m;
if ( copy_from_guest(&target, arg, 1) )
return -EFAULT;
......@@ -4835,10 +4834,7 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
}
else if ( rc >= 0 )
{
p2m = p2m_get_hostp2m(d);
target.tot_pages = domain_tot_pages(d);
target.pod_cache_pages = p2m->pod.count;
target.pod_entries = p2m->pod.entry_count;
p2m_pod_get_mem_target(d, &target);
if ( __copy_to_guest(arg, &target, 1) )
rc = -EFAULT;
......
......@@ -20,6 +20,7 @@
*/
#include <xen/event.h>
#include <xen/iocap.h>
#include <xen/ioreq.h>
#include <xen/mm.h>
#include <xen/sched.h>
......@@ -360,7 +361,10 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target)
ASSERT( pod_target >= p2m->pod.count );
ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
ret = -ENOTEMPTY;
else
ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
out:
pod_unlock(p2m);
......@@ -368,6 +372,23 @@ out:
return ret;
}
void p2m_pod_get_mem_target(const struct domain *d, xen_pod_target_t *target)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
ASSERT(is_hvm_domain(d));
pod_lock(p2m);
lock_page_alloc(p2m);
target->tot_pages = domain_tot_pages(d);
target->pod_cache_pages = p2m->pod.count;
target->pod_entries = p2m->pod.entry_count;
unlock_page_alloc(p2m);
pod_unlock(p2m);
}
int p2m_pod_empty_cache(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
......@@ -1391,6 +1412,9 @@ guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
if ( !paging_mode_translate(d) )
return -EINVAL;
if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
return -ENOTEMPTY;
do {
rc = mark_populate_on_demand(d, gfn, chunk_order);
......@@ -1412,3 +1436,20 @@ void p2m_pod_init(struct p2m_domain *p2m)
for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
}
bool p2m_pod_active(const struct domain *d)
{
struct p2m_domain *p2m;
bool res;
if ( !is_hvm_domain(d) )
return false;
p2m = p2m_get_hostp2m(d);
pod_lock(p2m);
res = p2m->pod.entry_count | p2m->pod.count;
pod_unlock(p2m);
return res;
}
......@@ -655,7 +655,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
rc = -EXDEV;
/* Disallow paging in a PoD guest */
if ( p2m_pod_entry_count(p2m_get_hostp2m(d)) )
if ( p2m_pod_active(d) )
break;
/* domain_pause() not required here, see XSA-99 */
......
......@@ -513,11 +513,12 @@ bool arch_iommu_use_permitted(const struct domain *d)
{
/*
* Prevent device assign if mem paging, mem sharing or log-dirty
* have been enabled for this domain.
* have been enabled for this domain, or if PoD is still in active use.
*/
return d == dom_io ||
(likely(!mem_sharing_enabled(d)) &&
likely(!mem_paging_enabled(d)) &&
likely(!p2m_pod_active(d)) &&
likely(!p2m_is_global_logdirty(d)));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment