Commit d711a8e5 authored by Tamas K Lengyel's avatar Tamas K Lengyel Committed by Jan Beulich

x86/mem_sharing: make fork_reset more configurable

Alow specify distinct parts of the fork VM to be reset. This is useful when a
fuzzing operation involves mapping in only a handful of pages that are known
ahead of time. Throwing these pages away just to be re-copied immediately is
expensive, thus allowing to specify partial resets can speed things up.

Also allow resetting to be initiated from vm_event responses as an
optiomization.
Signed-off-by: default avatarTamas K Lengyel <tamas.lengyel@intel.com>
Reviewed-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
parent 54a71fe6
......@@ -85,6 +85,9 @@ static inline bool mem_sharing_is_fork(const struct domain *d)
int mem_sharing_fork_page(struct domain *d, gfn_t gfn,
bool unsharing);
int mem_sharing_fork_reset(struct domain *d, bool reset_state,
bool reset_memory);
/*
* If called by a foreign domain, possible errors are
* -EBUSY -> ring full
......@@ -148,6 +151,12 @@ static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock)
return -EOPNOTSUPP;
}
static inline int mem_sharing_fork_reset(struct domain *d, bool reset_state,
bool reset_memory)
{
return -EOPNOTSUPP;
}
#endif
#endif /* __MEM_SHARING_H__ */
......@@ -1891,15 +1891,21 @@ static int fork(struct domain *cd, struct domain *d)
* footprints the hypercall continuation should be implemented (or if this
* feature needs to be become "stable").
*/
static int mem_sharing_fork_reset(struct domain *d)
int mem_sharing_fork_reset(struct domain *d, bool reset_state,
bool reset_memory)
{
int rc;
int rc = 0;
struct domain *pd = d->parent;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
struct page_info *page, *tmp;
ASSERT(reset_state || reset_memory);
domain_pause(d);
if ( !reset_memory )
goto state;
/* need recursive lock because we will free pages */
spin_lock_recursive(&d->page_alloc_lock);
page_list_for_each_safe(page, tmp, &d->page_list)
......@@ -1932,7 +1938,9 @@ static int mem_sharing_fork_reset(struct domain *d)
}
spin_unlock_recursive(&d->page_alloc_lock);
rc = copy_settings(d, pd);
state:
if ( reset_state )
rc = copy_settings(d, pd);
domain_unpause(d);
......@@ -2239,15 +2247,21 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
case XENMEM_sharing_op_fork_reset:
{
bool reset_state = mso.u.fork.flags & XENMEM_FORK_RESET_STATE;
bool reset_memory = mso.u.fork.flags & XENMEM_FORK_RESET_MEMORY;
rc = -EINVAL;
if ( mso.u.fork.pad || mso.u.fork.flags )
if ( mso.u.fork.pad || (!reset_state && !reset_memory) )
goto out;
if ( mso.u.fork.flags &
~(XENMEM_FORK_RESET_STATE | XENMEM_FORK_RESET_MEMORY) )
goto out;
rc = -ENOSYS;
if ( !d->parent )
goto out;
rc = mem_sharing_fork_reset(d);
rc = mem_sharing_fork_reset(d, reset_state, reset_memory);
break;
}
......
......@@ -28,6 +28,11 @@
#include <asm/p2m.h>
#include <asm/monitor.h>
#include <asm/vm_event.h>
#ifdef CONFIG_MEM_SHARING
#include <asm/mem_sharing.h>
#endif
#include <xsm/xsm.h>
#include <public/hvm/params.h>
......@@ -394,6 +399,17 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
p2m_mem_paging_resume(d, &rsp);
#endif
#ifdef CONFIG_MEM_SHARING
if ( mem_sharing_is_fork(d) )
{
bool reset_state = rsp.flags & VM_EVENT_FLAG_RESET_FORK_STATE;
bool reset_mem = rsp.flags & VM_EVENT_FLAG_RESET_FORK_MEMORY;
if ( (reset_state || reset_mem) &&
mem_sharing_fork_reset(d, reset_state, reset_mem) )
ASSERT_UNREACHABLE();
}
#endif
/*
* Check emulation flags in the arch-specific handler only, as it
......
......@@ -541,12 +541,14 @@ struct xen_mem_sharing_op {
uint32_t gref; /* IN: gref to debug */
} u;
} debug;
struct mem_sharing_op_fork { /* OP_FORK */
struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */
domid_t parent_domain; /* IN: parent's domain id */
/* Only makes sense for short-lived forks */
#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
/* Only makes sense for short-lived forks */
#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
#define XENMEM_FORK_RESET_STATE (1u << 2)
#define XENMEM_FORK_RESET_MEMORY (1u << 3)
uint16_t flags; /* IN: optional settings */
uint32_t pad; /* Must be set to 0 */
} fork;
......
......@@ -127,6 +127,14 @@
* Reset the vmtrace buffer (if vmtrace is enabled)
*/
#define VM_EVENT_FLAG_RESET_VMTRACE (1 << 13)
/*
* Reset the VM state (if VM is fork)
*/
#define VM_EVENT_FLAG_RESET_FORK_STATE (1 << 14)
/*
* Remove unshared entries from physmap (if VM is fork)
*/
#define VM_EVENT_FLAG_RESET_FORK_MEMORY (1 << 15)
/*
* Reasons for the vm event request
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment