2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <asm/tlbflush.h>
22 * We do use our own empty page to avoid interference with other users
23 * of ZERO_PAGE(), such as /dev/zero
25 static struct page *__xip_sparse_page;
27 static struct page *xip_sparse_page(void)
29 if (!__xip_sparse_page) {
30 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
33 static DEFINE_SPINLOCK(xip_alloc_lock);
34 spin_lock(&xip_alloc_lock);
35 if (!__xip_sparse_page)
36 __xip_sparse_page = page;
39 spin_unlock(&xip_alloc_lock);
42 return __xip_sparse_page;
46 * This is a file read routine for execute in place files, and uses
47 * the mapping->a_ops->get_xip_mem() function for the actual low-level
50 * Note the struct file* is not used at all. It may be NULL.
53 do_xip_mapping_read(struct address_space *mapping,
54 struct file_ra_state *_ra,
60 struct inode *inode = mapping->host;
61 pgoff_t index, end_index;
64 size_t copied = 0, error = 0;
66 BUG_ON(!mapping->a_ops->get_xip_mem);
69 index = pos >> PAGE_CACHE_SHIFT;
70 offset = pos & ~PAGE_CACHE_MASK;
72 isize = i_size_read(inode);
76 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
78 unsigned long nr, left;
80 unsigned long xip_pfn;
83 /* nr is the maximum number of bytes to copy from this page */
85 if (index >= end_index) {
86 if (index > end_index)
88 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
97 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
99 if (unlikely(error)) {
100 if (error == -ENODATA) {
107 /* If users can be writing to this page using arbitrary
108 * virtual addresses, take care about potential aliasing
109 * before reading the page on the kernel side.
111 if (mapping_writably_mapped(mapping))
112 /* address based flush */ ;
115 * Ok, we have the mem, so now we can copy it to user space...
117 * The actor routine returns how many bytes were actually used..
118 * NOTE! This may not be the same as how much of a user buffer
119 * we filled up (we may be padding etc), so we can only update
120 * "pos" here (the actor routine has to update the user buffer
121 * pointers and the remaining count).
124 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
126 left = __clear_user(buf + copied, nr);
133 copied += (nr - left);
134 offset += (nr - left);
135 index += offset >> PAGE_CACHE_SHIFT;
136 offset &= ~PAGE_CACHE_MASK;
137 } while (copied < len);
140 *ppos = pos + copied;
144 return (copied ? copied : error);
148 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
150 if (!access_ok(VERIFY_WRITE, buf, len))
153 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
156 EXPORT_SYMBOL_GPL(xip_file_read);
159 * __xip_unmap is invoked from xip_unmap and
162 * This function walks all vmas of the address_space and unmaps the
163 * __xip_sparse_page when found at pgoff.
166 __xip_unmap (struct address_space * mapping,
169 struct vm_area_struct *vma;
170 struct mm_struct *mm;
171 struct prio_tree_iter iter;
172 unsigned long address;
178 page = __xip_sparse_page;
182 spin_lock(&mapping->i_mmap_lock);
183 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
185 address = vma->vm_start +
186 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
187 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
188 pte = page_check_address(page, mm, address, &ptl);
190 /* Nuke the page table entry. */
191 flush_cache_page(vma, address, pte_pfn(*pte));
192 pteval = ptep_clear_flush_notify(vma, address, pte);
193 page_remove_rmap(page, vma);
194 dec_mm_counter(mm, file_rss);
195 BUG_ON(pte_dirty(pteval));
196 pte_unmap_unlock(pte, ptl);
197 page_cache_release(page);
200 spin_unlock(&mapping->i_mmap_lock);
204 * xip_fault() is invoked via the vma operations vector for a
205 * mapped memory region to read in file data during a page fault.
207 * This function is derived from filemap_fault, but used for execute in place
209 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
211 struct file *file = vma->vm_file;
212 struct address_space *mapping = file->f_mapping;
213 struct inode *inode = mapping->host;
216 unsigned long xip_pfn;
220 /* XXX: are VM_FAULT_ codes OK? */
222 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
223 if (vmf->pgoff >= size)
224 return VM_FAULT_SIGBUS;
226 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
230 if (error != -ENODATA)
234 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
235 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
236 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
239 /* maybe shared writable, allocate new block */
240 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
243 return VM_FAULT_SIGBUS;
244 /* unmap sparse mappings at pgoff from all other vmas */
245 __xip_unmap(mapping, vmf->pgoff);
248 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
253 return VM_FAULT_NOPAGE;
255 /* not shared and writable, use xip_sparse_page() */
256 page = xip_sparse_page();
260 page_cache_get(page);
266 static struct vm_operations_struct xip_file_vm_ops = {
267 .fault = xip_file_fault,
270 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
272 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
275 vma->vm_ops = &xip_file_vm_ops;
276 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
279 EXPORT_SYMBOL_GPL(xip_file_mmap);
282 __xip_file_write(struct file *filp, const char __user *buf,
283 size_t count, loff_t pos, loff_t *ppos)
285 struct address_space * mapping = filp->f_mapping;
286 const struct address_space_operations *a_ops = mapping->a_ops;
287 struct inode *inode = mapping->host;
292 BUG_ON(!mapping->a_ops->get_xip_mem);
296 unsigned long offset;
299 unsigned long xip_pfn;
301 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
302 index = pos >> PAGE_CACHE_SHIFT;
303 bytes = PAGE_CACHE_SIZE - offset;
307 status = a_ops->get_xip_mem(mapping, index, 0,
309 if (status == -ENODATA) {
310 /* we allocate a new page unmap it */
311 status = a_ops->get_xip_mem(mapping, index, 1,
314 /* unmap page at pgoff from all other vmas */
315 __xip_unmap(mapping, index);
322 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
324 if (likely(copied > 0)) {
334 if (unlikely(copied != bytes))
342 * No need to use i_size_read() here, the i_size
343 * cannot change under us because we hold i_mutex.
345 if (pos > inode->i_size) {
346 i_size_write(inode, pos);
347 mark_inode_dirty(inode);
350 return written ? written : status;
354 xip_file_write(struct file *filp, const char __user *buf, size_t len,
357 struct address_space *mapping = filp->f_mapping;
358 struct inode *inode = mapping->host;
363 mutex_lock(&inode->i_mutex);
365 if (!access_ok(VERIFY_READ, buf, len)) {
373 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
375 /* We can write back this queue in page reclaim */
376 current->backing_dev_info = mapping->backing_dev_info;
378 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
384 ret = file_remove_suid(filp);
388 file_update_time(filp);
390 ret = __xip_file_write (filp, buf, count, pos, ppos);
393 current->backing_dev_info = NULL;
395 mutex_unlock(&inode->i_mutex);
398 EXPORT_SYMBOL_GPL(xip_file_write);
401 * truncate a page used for execute in place
402 * functionality is analog to block_truncate_page but does use get_xip_mem
403 * to get the page instead of page cache
406 xip_truncate_page(struct address_space *mapping, loff_t from)
408 pgoff_t index = from >> PAGE_CACHE_SHIFT;
409 unsigned offset = from & (PAGE_CACHE_SIZE-1);
413 unsigned long xip_pfn;
416 BUG_ON(!mapping->a_ops->get_xip_mem);
418 blocksize = 1 << mapping->host->i_blkbits;
419 length = offset & (blocksize - 1);
421 /* Block boundary? Nothing to do */
425 length = blocksize - length;
427 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
431 /* Hole? No need to truncate */
436 memset(xip_mem + offset, 0, length);
439 EXPORT_SYMBOL_GPL(xip_truncate_page);