fremap.c 6.69 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 *   linux/mm/fremap.c
 * 
 * Explicit pagetable population and nonlinear (random) mappings support.
 *
 * started by Ingo Molnar, Copyright (C) 2002, 2003
 */
Alexey Dobriyan's avatar
Alexey Dobriyan committed
8
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12 13 14 15 16 17
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/syscalls.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
18
#include <linux/mmu_notifier.h>
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22 23

#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

24 25
#include "internal.h"

Nick Piggin's avatar
Nick Piggin committed
26
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
Linus Torvalds's avatar
Linus Torvalds committed
27 28 29 30 31
			unsigned long addr, pte_t *ptep)
{
	pte_t pte = *ptep;

	if (pte_present(pte)) {
Nick Piggin's avatar
Nick Piggin committed
32 33
		struct page *page;

34
		flush_cache_page(vma, addr, pte_pfn(pte));
Linus Torvalds's avatar
Linus Torvalds committed
35
		pte = ptep_clear_flush(vma, addr, ptep);
36 37 38 39
		page = vm_normal_page(vma, addr, pte);
		if (page) {
			if (pte_dirty(pte))
				set_page_dirty(page);
40
			page_remove_rmap(page);
41
			page_cache_release(page);
Nick Piggin's avatar
Nick Piggin committed
42 43
			update_hiwater_rss(mm);
			dec_mm_counter(mm, file_rss);
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47
		}
	} else {
		if (!pte_file(pte))
			free_swap_and_cache(pte_to_swp_entry(pte));
48
		pte_clear_not_present_full(mm, addr, ptep, 0);
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55
	}
}

/*
 * Install a file pte to a given virtual memory address, release any
 * previously existing mapping.
 */
Nick Piggin's avatar
Nick Piggin committed
56
static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
Linus Torvalds's avatar
Linus Torvalds committed
57 58 59 60
		unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
	int err = -ENOMEM;
	pte_t *pte;
61
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed
62

63
	pte = get_locked_pte(mm, addr, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
64
	if (!pte)
65
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
66

Nick Piggin's avatar
Nick Piggin committed
67 68
	if (!pte_none(*pte))
		zap_pte(mm, vma, addr, pte);
Linus Torvalds's avatar
Linus Torvalds committed
69 70

	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
71 72 73 74 75 76 77
	/*
	 * We don't need to run update_mmu_cache() here because the "file pte"
	 * being installed by install_file_pte() is not a real pte - it's a
	 * non-present entry (like a swap entry), noting what file offset should
	 * be mapped there when there's a fault (in a non-linear vma where
	 * that's not obvious).
	 */
78 79 80
	pte_unmap_unlock(pte, ptl);
	err = 0;
out:
Linus Torvalds's avatar
Linus Torvalds committed
81 82 83
	return err;
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long addr, unsigned long size, pgoff_t pgoff)
{
	int err;

	do {
		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
		if (err)
			return err;

		size -= PAGE_SIZE;
		addr += PAGE_SIZE;
		pgoff++;
	} while (size);

        return 0;

}

103 104
/**
 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
Linus Torvalds's avatar
Linus Torvalds committed
105 106
 * @start: start of the remapped virtual memory range
 * @size: size of the remapped virtual memory range
107 108
 * @prot: new protection bits of the range (see NOTE)
 * @pgoff: to-be-mapped page of the backing store file
Linus Torvalds's avatar
Linus Torvalds committed
109 110
 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
 *
111 112 113 114
 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
 * (shared backing store file).
 *
 * This syscall works purely via pagetables, so it's the most efficient
Linus Torvalds's avatar
Linus Torvalds committed
115 116 117 118
 * way to map the same (large) file into a given virtual window. Unlike
 * mmap()/mremap() it does not create any new vmas. The new mappings are
 * also safe across swapout.
 *
119
 * NOTE: the @prot parameter right now is ignored (but must be zero),
120 121
 * and the vma's default protection is used. Arbitrary protections
 * might be implemented in the future.
Linus Torvalds's avatar
Linus Torvalds committed
122
 */
123 124
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
Linus Torvalds's avatar
Linus Torvalds committed
125 126 127 128 129 130 131 132
{
	struct mm_struct *mm = current->mm;
	struct address_space *mapping;
	unsigned long end = start + size;
	struct vm_area_struct *vma;
	int err = -EINVAL;
	int has_write_lock = 0;

133
	if (prot)
Linus Torvalds's avatar
Linus Torvalds committed
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
		return err;
	/*
	 * Sanitize the syscall parameters:
	 */
	start = start & PAGE_MASK;
	size = size & PAGE_MASK;

	/* Does the address range wrap, or is the span zero-sized? */
	if (start + size <= start)
		return err;

	/* Can we represent this offset inside this architecture's pte's? */
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
		return err;
#endif

	/* We need down_write() to change vma->vm_flags. */
	down_read(&mm->mmap_sem);
 retry:
	vma = find_vma(mm, start);

	/*
	 * Make sure the vma is shared, that it supports prefaulting,
	 * and that the remapped range is valid and fully within
	 * the single existing vma.  vm_private_data is used as a
160
	 * swapout cursor in a VM_NONLINEAR vma.
Linus Torvalds's avatar
Linus Torvalds committed
161
	 */
162 163 164 165 166 167
	if (!vma || !(vma->vm_flags & VM_SHARED))
		goto out;

	if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
		goto out;

168
	if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
		goto out;

	if (end <= start || start < vma->vm_start || end > vma->vm_end)
		goto out;

	/* Must set VM_NONLINEAR before any pages are populated. */
	if (!(vma->vm_flags & VM_NONLINEAR)) {
		/* Don't need a nonlinear mapping, exit success */
		if (pgoff == linear_page_index(vma, start)) {
			err = 0;
			goto out;
		}

		if (!has_write_lock) {
			up_read(&mm->mmap_sem);
			down_write(&mm->mmap_sem);
			has_write_lock = 1;
			goto retry;
		}
		mapping = vma->vm_file->f_mapping;
189 190 191 192 193 194 195
		/*
		 * page_mkclean doesn't work on nonlinear vmas, so if
		 * dirty pages need to be accounted, emulate with linear
		 * vmas.
		 */
		if (mapping_cap_account_dirty(mapping)) {
			unsigned long addr;
196
			struct file *file = vma->vm_file;
197 198

			flags &= MAP_NONBLOCK;
199 200
			get_file(file);
			addr = mmap_region(file, start, size,
201
					flags, vma->vm_flags, pgoff);
202
			fput(file);
203 204 205 206 207 208 209 210
			if (IS_ERR_VALUE(addr)) {
				err = addr;
			} else {
				BUG_ON(addr != start);
				err = 0;
			}
			goto out;
		}
211 212 213 214 215 216 217 218 219
		spin_lock(&mapping->i_mmap_lock);
		flush_dcache_mmap_lock(mapping);
		vma->vm_flags |= VM_NONLINEAR;
		vma_prio_tree_remove(vma, &mapping->i_mmap);
		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
		flush_dcache_mmap_unlock(mapping);
		spin_unlock(&mapping->i_mmap_lock);
	}

220 221 222 223 224 225 226 227 228
	if (vma->vm_flags & VM_LOCKED) {
		/*
		 * drop PG_Mlocked flag for over-mapped range
		 */
		unsigned int saved_flags = vma->vm_flags;
		munlock_vma_pages_range(vma, start, start + size);
		vma->vm_flags = saved_flags;
	}

Andrea Arcangeli's avatar
Andrea Arcangeli committed
229
	mmu_notifier_invalidate_range_start(mm, start, start + size);
Nick Piggin's avatar
Nick Piggin committed
230
	err = populate_range(mm, vma, start, size, pgoff);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
231
	mmu_notifier_invalidate_range_end(mm, start, start + size);
Nick Piggin's avatar
Nick Piggin committed
232
	if (!err && !(flags & MAP_NONBLOCK)) {
233 234 235 236 237 238 239 240 241 242 243
		if (vma->vm_flags & VM_LOCKED) {
			/*
			 * might be mapping previously unmapped range of file
			 */
			mlock_vma_pages_range(vma, start, start + size);
		} else {
			if (unlikely(has_write_lock)) {
				downgrade_write(&mm->mmap_sem);
				has_write_lock = 0;
			}
			make_pages_present(start, start+size);
Linus Torvalds's avatar
Linus Torvalds committed
244
		}
Nick Piggin's avatar
Nick Piggin committed
245
	}
Linus Torvalds's avatar
Linus Torvalds committed
246

247 248 249 250 251
	/*
	 * We can't clear VM_NONLINEAR because we'd have to do
	 * it after ->populate completes, and that would prevent
	 * downgrading the lock.  (Locks can't be upgraded).
	 */
Linus Torvalds's avatar
Linus Torvalds committed
252

253
out:
Linus Torvalds's avatar
Linus Torvalds committed
254 255 256 257 258 259 260
	if (likely(!has_write_lock))
		up_read(&mm->mmap_sem);
	else
		up_write(&mm->mmap_sem);

	return err;
}