1 /* drivers/android/pmem.c
3 * Copyright (C) 2007 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/miscdevice.h>
17 #include <linux/platform_device.h>
19 #include <linux/file.h>
21 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/android_pmem.h>
24 #include <linux/mempolicy.h>
25 #include <linux/sched.h>
27 #include <asm/uaccess.h>
28 #include <asm/cacheflush.h>
30 #define PMEM_MAX_DEVICES 10
31 #define PMEM_MAX_ORDER 128
32 #define PMEM_MIN_ALLOC PAGE_SIZE
36 /* indicates that a refernce to this file has been taken via get_pmem_file,
37 * the file should not be released until put_pmem_file is called */
38 #define PMEM_FLAGS_BUSY 0x1
39 /* indicates that this is a suballocation of a larger master range */
40 #define PMEM_FLAGS_CONNECTED ( 0x1 << 1 )
41 /* indicates this is a master and not a sub allocation and that it is mmaped */
42 #define PMEM_FLAGS_MASTERMAP ( 0x1 << 2 )
43 /* submap and unsubmap flags indicate:
44 * 00: subregion has never been mmaped
45 * 10: subregion has been mmaped, reference to the mm was taken
46 * 11: subretion has ben released, refernece to the mm still held
47 * 01: subretion has been released, reference to the mm has been released
49 #define PMEM_FLAGS_SUBMAP ( 0x1 << 3 )
50 #define PMEM_FLAGS_UNSUBMAP ( 0x1 << 4 )
54 /* in alloc mode: an index into the bitmap
55 * in no_alloc mode: the size of the allocation */
57 /* see flags above for descriptions */
59 /* protects this data field, if the mm_mmap sem will be held at the
60 * same time as this sem, the mm sem must be taken first (as this is
61 * the order for vma_open and vma_close ops */
62 struct rw_semaphore sem;
63 /* info about the mmaping process */
64 struct vm_area_struct *vma;
65 /* task struct of the mapping process */
66 struct task_struct *task;
67 /* process id of teh mapping process */
69 /* file descriptor of the master */
71 /* file struct of the master */
72 struct file *master_file;
73 /* a list of currently available regions if this is a suballocation */
74 struct list_head region_list;
75 /* a linked list of data so we can access them for debugging */
76 struct list_head list;
83 unsigned allocated:1; /* 1 if allocated, 0 if free */
84 unsigned order:7; /* size of the region in pmem space */
87 struct pmem_region_node {
88 struct pmem_region region;
89 struct list_head list;
92 #define PMEM_DEBUG_MSGS 0
94 #define DLOG(fmt, args...) \
95 do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
99 #define DLOG(x...) do {} while (0)
103 struct miscdevice dev;
104 /* physical start address of the remaped pmem space */
106 /* vitual start address of the remaped pmem space */
107 unsigned char __iomem *vbase;
108 /* total size of the pmem space */
110 /* number of entries in the pmem space */
111 unsigned long num_entries;
112 /* pfn of the garbage page in memory */
113 unsigned long garbage_pfn;
114 /* index of the garbage page in the pmem space */
116 /* the bitmap for the region indicating which entries are allocated
117 * and which are free */
118 struct pmem_bits *bitmap;
119 /* indicates the region should not be managed with an allocator */
120 unsigned no_allocator;
121 /* indicates maps of this region should be cached, if a mix of
122 * cached and uncached is desired, set this and open the device with
123 * O_SYNC to get an uncached region */
126 /* in no_allocator mode the first mapper gets the whole space and sets
129 /* for debugging, creates a list of pmem file structs, the
130 * data_list_sem should be taken before pmem_data->sem if both are
132 struct semaphore data_list_sem;
133 struct list_head data_list;
134 /* pmem_sem protects the bitmap array
135 * a write lock should be held when modifying entries in bitmap
136 * a read lock should be held when reading data from bits or
137 * dereferencing a pointer into bitmap
139 * pmem_data->sem protects the pmem data of a particular file
140 * Many of the function that require the pmem_data->sem have a non-
141 * locking version for when the caller is already holding that sem.
143 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
144 * down(pmem_data->sem) => down(bitmap_sem)
146 struct rw_semaphore bitmap_sem;
148 long (*ioctl)(struct file *, unsigned int, unsigned long);
149 int (*release)(struct inode *, struct file *);
152 static struct pmem_info pmem[PMEM_MAX_DEVICES];
155 #define PMEM_IS_FREE(id, index) ( !(pmem[id].bitmap[index].allocated) )
156 #define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
157 #define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
158 #define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
159 #define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
160 #define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
161 #define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
162 #define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
164 #define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
165 #define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
167 #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
168 #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
169 #define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
170 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
172 static int pmem_release(struct inode *, struct file *);
173 static int pmem_mmap(struct file *, struct vm_area_struct *);
174 static int pmem_open(struct inode *, struct file *);
175 static long pmem_ioctl(struct file *, unsigned int, unsigned long);
177 struct file_operations pmem_fops = {
178 .release = pmem_release,
181 .unlocked_ioctl = pmem_ioctl,
184 static int get_id(struct file *file)
186 return MINOR(file->f_dentry->d_inode->i_rdev);
189 static int is_pmem_file(struct file *file)
193 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
196 if (unlikely(id >= PMEM_MAX_DEVICES))
198 if (unlikely(file->f_dentry->d_inode->i_rdev !=
199 MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
204 static int has_allocation(struct file *file)
206 struct pmem_data *data;
207 /* check is_pmem_file first if not accessed via pmem_file_ops */
209 if (unlikely(!file->private_data))
211 data = (struct pmem_data *)file->private_data;
212 if (unlikely(data->index < 0))
217 static int is_master_owner(struct file *file)
219 struct file *master_file;
220 struct pmem_data *data;
221 int put_needed, ret = 0;
223 if (!is_pmem_file(file) || !has_allocation(file))
225 data = (struct pmem_data *)file->private_data;
226 if (PMEM_FLAGS_MASTERMAP & data->flags)
228 master_file = fget_light(data->master_fd, &put_needed);
229 if (master_file && data->master_file == master_file)
231 fput_light(master_file, put_needed);
235 static int pmem_free(int id, int index)
237 /* caller should hold the write lock on pmem_sem! */
238 int buddy, curr = index;
239 DLOG("index %d\n", index);
241 if (pmem[id].no_allocator) {
242 pmem[id].allocated = 0;
245 /* clean up the bitmap, merging any buddies */
246 pmem[id].bitmap[curr].allocated = 0;
247 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
248 * if the buddy is also free merge them
249 * repeat until the buddy is not free or end of the bitmap is reached
252 buddy = PMEM_BUDDY_INDEX(id, curr);
253 if (PMEM_IS_FREE(id, buddy) &&
254 PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
255 PMEM_ORDER(id, buddy)++;
256 PMEM_ORDER(id, curr)++;
257 curr = min(buddy, curr);
261 } while (curr < pmem[id].num_entries);
266 static void pmem_revoke(struct file *file, struct pmem_data *data);
268 static int pmem_release(struct inode *inode, struct file *file)
270 struct pmem_data *data = (struct pmem_data *)file->private_data;
271 struct pmem_region_node *region_node;
272 struct list_head *elt, *elt2;
273 int id = get_id(file), ret = 0;
276 down(&pmem[id].data_list_sem);
277 /* if this file is a master, revoke all the memory in the connected
279 if (PMEM_FLAGS_MASTERMAP & data->flags) {
280 struct pmem_data *sub_data;
281 list_for_each(elt, &pmem[id].data_list) {
282 sub_data = list_entry(elt, struct pmem_data, list);
283 down_read(&sub_data->sem);
284 if (PMEM_IS_SUBMAP(sub_data) &&
285 file == sub_data->master_file) {
286 up_read(&sub_data->sem);
287 pmem_revoke(file, sub_data);
289 up_read(&sub_data->sem);
292 list_del(&data->list);
293 up(&pmem[id].data_list_sem);
296 down_write(&data->sem);
298 /* if its not a conencted file and it has an allocation, free it */
299 if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
300 down_write(&pmem[id].bitmap_sem);
301 ret = pmem_free(id, data->index);
302 up_write(&pmem[id].bitmap_sem);
305 /* if this file is a submap (mapped, connected file), downref the
307 if (PMEM_FLAGS_SUBMAP & data->flags)
309 put_task_struct(data->task);
313 file->private_data = NULL;
315 list_for_each_safe(elt, elt2, &data->region_list) {
316 region_node = list_entry(elt, struct pmem_region_node, list);
320 BUG_ON(!list_empty(&data->region_list));
322 up_write(&data->sem);
324 if (pmem[id].release)
325 ret = pmem[id].release(inode, file);
330 static int pmem_open(struct inode *inode, struct file *file)
332 struct pmem_data *data;
333 int id = get_id(file);
336 DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
337 /* setup file->private_data to indicate its unmapped */
338 /* you can only open a pmem device one time */
339 if (file->private_data != NULL)
341 data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
343 printk("pmem: unable to allocate memory for pmem metadata.");
351 data->master_file = NULL;
355 INIT_LIST_HEAD(&data->region_list);
356 init_rwsem(&data->sem);
358 file->private_data = data;
359 INIT_LIST_HEAD(&data->list);
361 down(&pmem[id].data_list_sem);
362 list_add(&data->list, &pmem[id].data_list);
363 up(&pmem[id].data_list_sem);
367 static unsigned long pmem_order(unsigned long len)
371 len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
373 for (i = 0; i < sizeof(len)*8; i++)
379 static int pmem_allocate(int id, unsigned long len)
381 /* caller should hold the write lock on pmem_sem! */
382 /* return the corresponding pdata[] entry */
384 int end = pmem[id].num_entries;
386 unsigned long order = pmem_order(len);
388 if (pmem[id].no_allocator) {
389 DLOG("no allocator");
390 if ((len > pmem[id].size) || pmem[id].allocated)
392 pmem[id].allocated = 1;
396 if (order > PMEM_MAX_ORDER)
398 DLOG("order %lx\n", order);
400 /* look through the bitmap:
401 * if you find a free slot of the correct order use it
402 * otherwise, use the best fit (smallest with size > order) slot
405 if (PMEM_IS_FREE(id, curr)) {
406 if (PMEM_ORDER(id, curr) == (unsigned char)order) {
407 /* set the not free bit and clear others */
411 if (PMEM_ORDER(id, curr) > (unsigned char)order &&
413 PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
416 curr = PMEM_NEXT_INDEX(id, curr);
419 /* if best_fit < 0, there are no suitable slots,
423 printk("pmem: no space left to allocate!\n");
427 /* now partition the best fit:
428 * split the slot into 2 buddies of order - 1
429 * repeat until the slot is of the correct order
431 while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
433 PMEM_ORDER(id, best_fit) -= 1;
434 buddy = PMEM_BUDDY_INDEX(id, best_fit);
435 PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
437 pmem[id].bitmap[best_fit].allocated = 1;
441 static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
443 int id = get_id(file);
444 #ifdef pgprot_noncached
445 if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
446 return pgprot_noncached(vma_prot);
448 #ifdef pgprot_ext_buffered
449 else if (pmem[id].buffered)
450 return pgprot_ext_buffered(vma_prot);
455 static unsigned long pmem_start_addr(int id, struct pmem_data *data)
457 if (pmem[id].no_allocator)
458 return PMEM_START_ADDR(id, 0);
460 return PMEM_START_ADDR(id, data->index);
464 static void *pmem_start_vaddr(int id, struct pmem_data *data)
466 return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
469 static unsigned long pmem_len(int id, struct pmem_data *data)
471 if (pmem[id].no_allocator)
474 return PMEM_LEN(id, data->index);
477 static int pmem_map_garbage(int id, struct vm_area_struct *vma,
478 struct pmem_data *data, unsigned long offset,
481 int i, garbage_pages = len >> PAGE_SHIFT;
483 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
484 for (i = 0; i < garbage_pages; i++) {
485 if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
486 pmem[id].garbage_pfn))
492 static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
493 struct pmem_data *data, unsigned long offset,
497 DLOG("unmap offset %lx len %lx\n", offset, len);
499 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
501 garbage_pages = len >> PAGE_SHIFT;
502 zap_page_range(vma, vma->vm_start + offset, len, NULL);
503 pmem_map_garbage(id, vma, data, offset, len);
507 static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
508 struct pmem_data *data, unsigned long offset,
511 DLOG("map offset %lx len %lx\n", offset, len);
512 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
513 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
514 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
515 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
517 if (io_remap_pfn_range(vma, vma->vm_start + offset,
518 (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
519 len, vma->vm_page_prot)) {
525 static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
526 struct pmem_data *data, unsigned long offset,
529 /* hold the mm semp for the vma you are modifying when you call this */
531 zap_page_range(vma, vma->vm_start + offset, len, NULL);
532 return pmem_map_pfn_range(id, vma, data, offset, len);
535 static void pmem_vma_open(struct vm_area_struct *vma)
537 struct file *file = vma->vm_file;
538 struct pmem_data *data = file->private_data;
539 int id = get_id(file);
540 /* this should never be called as we don't support copying pmem
542 BUG_ON(!has_allocation(file));
543 down_write(&data->sem);
544 /* remap the garbage pages, forkers don't get access to the data */
545 pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
546 up_write(&data->sem);
549 static void pmem_vma_close(struct vm_area_struct *vma)
551 struct file *file = vma->vm_file;
552 struct pmem_data *data = file->private_data;
554 DLOG("current %u ppid %u file %p count %d\n", current->pid,
555 current->parent->pid, file, file_count(file));
556 if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
557 printk(KERN_WARNING "pmem: something is very wrong, you are "
558 "closing a vm backing an allocation that doesn't "
562 down_write(&data->sem);
563 if (data->vma == vma) {
565 if ((data->flags & PMEM_FLAGS_CONNECTED) &&
566 (data->flags & PMEM_FLAGS_SUBMAP))
567 data->flags |= PMEM_FLAGS_UNSUBMAP;
569 /* the kernel is going to free this vma now anyway */
570 up_write(&data->sem);
573 static struct vm_operations_struct vm_ops = {
574 .open = pmem_vma_open,
575 .close = pmem_vma_close,
578 static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
580 struct pmem_data *data;
582 unsigned long vma_size = vma->vm_end - vma->vm_start;
583 int ret = 0, id = get_id(file);
585 if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
587 printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
588 " and a multiple of pages_size.\n");
593 data = (struct pmem_data *)file->private_data;
594 down_write(&data->sem);
595 /* check this file isn't already mmaped, for submaps check this file
596 * has never been mmaped */
597 if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
598 (data->flags & PMEM_FLAGS_SUBMAP) ||
599 (data->flags & PMEM_FLAGS_UNSUBMAP)) {
601 printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
602 "this file is already mmaped. %x\n", data->flags);
607 /* if file->private_data == unalloced, alloc*/
608 if (data && data->index == -1) {
609 down_write(&pmem[id].bitmap_sem);
610 index = pmem_allocate(id, vma->vm_end - vma->vm_start);
611 up_write(&pmem[id].bitmap_sem);
614 /* either no space was available or an error occured */
615 if (!has_allocation(file)) {
617 printk("pmem: could not find allocation for map.\n");
621 if (pmem_len(id, data) < vma_size) {
623 printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
624 "size of backing region [%lu].\n", vma_size,
631 vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
632 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);
634 if (data->flags & PMEM_FLAGS_CONNECTED) {
635 struct pmem_region_node *region_node;
636 struct list_head *elt;
637 if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
638 printk("pmem: mmap failed in kernel!\n");
642 list_for_each(elt, &data->region_list) {
643 region_node = list_entry(elt, struct pmem_region_node,
645 DLOG("remapping file: %p %lx %lx\n", file,
646 region_node->region.offset,
647 region_node->region.len);
648 if (pmem_remap_pfn_range(id, vma, data,
649 region_node->region.offset,
650 region_node->region.len)) {
655 data->flags |= PMEM_FLAGS_SUBMAP;
656 get_task_struct(current->group_leader);
657 data->task = current->group_leader;
660 data->pid = current->pid;
662 DLOG("submmapped file %p vma %p pid %u\n", file, vma,
665 if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
666 printk(KERN_INFO "pmem: mmap failed in kernel!\n");
670 data->flags |= PMEM_FLAGS_MASTERMAP;
671 data->pid = current->pid;
673 vma->vm_ops = &vm_ops;
675 up_write(&data->sem);
679 /* the following are the api for accessing pmem regions by other drivers
680 * from inside the kernel */
681 int get_pmem_user_addr(struct file *file, unsigned long *start,
684 struct pmem_data *data;
685 if (!is_pmem_file(file) || !has_allocation(file)) {
687 printk(KERN_INFO "pmem: requested pmem data from invalid"
692 data = (struct pmem_data *)file->private_data;
693 down_read(&data->sem);
695 *start = data->vma->vm_start;
696 *len = data->vma->vm_end - data->vma->vm_start;
705 int get_pmem_addr(struct file *file, unsigned long *start,
706 unsigned long *vstart, unsigned long *len)
708 struct pmem_data *data;
711 if (!is_pmem_file(file) || !has_allocation(file))
714 data = (struct pmem_data *)file->private_data;
715 if (data->index == -1) {
717 printk(KERN_INFO "pmem: requested pmem data from file with no "
724 down_read(&data->sem);
725 *start = pmem_start_addr(id, data);
726 *len = pmem_len(id, data);
727 *vstart = (unsigned long)pmem_start_vaddr(id, data);
730 down_write(&data->sem);
732 up_write(&data->sem);
737 int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
738 unsigned long *len, struct file **filp)
743 if (unlikely(file == NULL)) {
744 printk(KERN_INFO "pmem: requested data from file descriptor "
745 "that doesn't exist.");
749 if (get_pmem_addr(file, start, vstart, len))
760 void put_pmem_file(struct file *file)
762 struct pmem_data *data;
765 if (!is_pmem_file(file))
768 data = (struct pmem_data *)file->private_data;
770 down_write(&data->sem);
771 if (data->ref == 0) {
772 printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
773 pmem[id].dev.name, data->pid);
777 up_write(&data->sem);
782 void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
784 struct pmem_data *data;
787 struct pmem_region_node *region_node;
788 struct list_head *elt;
789 void *flush_start, *flush_end;
791 if (!is_pmem_file(file) || !has_allocation(file))
795 data = (struct pmem_data *)file->private_data;
796 if (!pmem[id].cached)
799 down_read(&data->sem);
800 vaddr = pmem_start_vaddr(id, data);
801 /* if this isn't a submmapped file, flush the whole thing */
802 if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
803 dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
806 /* otherwise, flush the region of the file we are drawing */
807 list_for_each(elt, &data->region_list) {
808 region_node = list_entry(elt, struct pmem_region_node, list);
809 if ((offset >= region_node->region.offset) &&
810 ((offset + len) <= (region_node->region.offset +
811 region_node->region.len))) {
812 flush_start = vaddr + region_node->region.offset;
813 flush_end = flush_start + region_node->region.len;
814 dmac_flush_range(flush_start, flush_end);
822 static int pmem_connect(unsigned long connect, struct file *file)
824 struct pmem_data *data = (struct pmem_data *)file->private_data;
825 struct pmem_data *src_data;
826 struct file *src_file;
827 int ret = 0, put_needed;
829 down_write(&data->sem);
830 /* retrieve the src file and check it is a pmem file with an alloc */
831 src_file = fget_light(connect, &put_needed);
832 DLOG("connect %p to %p\n", file, src_file);
834 printk(KERN_INFO "pmem: src file not found!\n");
838 if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
839 printk(KERN_INFO "pmem: src file is not a pmem file or has no "
844 src_data = (struct pmem_data *)src_file->private_data;
846 if (has_allocation(file) && (data->index != src_data->index)) {
847 printk(KERN_INFO "pmem: file is already mapped but doesn't match this"
852 data->index = src_data->index;
853 data->flags |= PMEM_FLAGS_CONNECTED;
854 data->master_fd = connect;
855 data->master_file = src_file;
858 fput_light(src_file, put_needed);
860 up_write(&data->sem);
864 static void pmem_unlock_data_and_mm(struct pmem_data *data,
865 struct mm_struct *mm)
867 up_write(&data->sem);
869 up_write(&mm->mmap_sem);
874 static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
875 struct mm_struct **locked_mm)
878 struct mm_struct *mm = NULL;
881 down_read(&data->sem);
882 if (PMEM_IS_SUBMAP(data)) {
883 mm = get_task_mm(data->task);
886 printk(KERN_DEBUG "pmem: can't remap task is gone!\n");
895 down_write(&mm->mmap_sem);
897 down_write(&data->sem);
898 /* check that the file didn't get mmaped before we could take the
899 * data sem, this should be safe b/c you can only submap each file
901 if (PMEM_IS_SUBMAP(data) && !mm) {
902 pmem_unlock_data_and_mm(data, mm);
903 up_write(&data->sem);
906 /* now check that vma.mm is still there, it could have been
907 * deleted by vma_close before we could get the data->sem */
908 if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
909 /* might as well release this */
910 if (data->flags & PMEM_FLAGS_SUBMAP) {
911 put_task_struct(data->task);
913 /* lower the submap flag to show the mm is gone */
914 data->flags &= ~(PMEM_FLAGS_SUBMAP);
916 pmem_unlock_data_and_mm(data, mm);
923 int pmem_remap(struct pmem_region *region, struct file *file,
927 struct pmem_region_node *region_node;
928 struct mm_struct *mm = NULL;
929 struct list_head *elt, *elt2;
930 int id = get_id(file);
931 struct pmem_data *data = (struct pmem_data *)file->private_data;
933 /* pmem region must be aligned on a page boundry */
934 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
935 !PMEM_IS_PAGE_ALIGNED(region->len))) {
937 printk(KERN_DEBUG "pmem: request for unaligned pmem suballocation "
938 "%lx %lx\n", region->offset, region->len);
943 /* if userspace requests a region of len 0, there's nothing to do */
944 if (region->len == 0)
947 /* lock the mm and data */
948 ret = pmem_lock_data_and_mm(file, data, &mm);
952 /* only the owner of the master file can remap the client fds
954 if (!is_master_owner(file)) {
956 printk("pmem: remap requested from non-master process\n");
962 /* check that the requested range is within the src allocation */
963 if (unlikely((region->offset > pmem_len(id, data)) ||
964 (region->len > pmem_len(id, data)) ||
965 (region->offset + region->len > pmem_len(id, data)))) {
967 printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
973 if (operation == PMEM_MAP) {
974 region_node = kmalloc(sizeof(struct pmem_region_node),
979 printk(KERN_INFO "No space to allocate metadata!");
983 region_node->region = *region;
984 list_add(®ion_node->list, &data->region_list);
985 } else if (operation == PMEM_UNMAP) {
987 list_for_each_safe(elt, elt2, &data->region_list) {
988 region_node = list_entry(elt, struct pmem_region_node,
990 if (region->len == 0 ||
991 (region_node->region.offset == region->offset &&
992 region_node->region.len == region->len)) {
1000 printk("pmem: Unmap region does not map any mapped "
1008 if (data->vma && PMEM_IS_SUBMAP(data)) {
1009 if (operation == PMEM_MAP)
1010 ret = pmem_remap_pfn_range(id, data->vma, data,
1011 region->offset, region->len);
1012 else if (operation == PMEM_UNMAP)
1013 ret = pmem_unmap_pfn_range(id, data->vma, data,
1014 region->offset, region->len);
1018 pmem_unlock_data_and_mm(data, mm);
1022 static void pmem_revoke(struct file *file, struct pmem_data *data)
1024 struct pmem_region_node *region_node;
1025 struct list_head *elt, *elt2;
1026 struct mm_struct *mm = NULL;
1027 int id = get_id(file);
1030 data->master_file = NULL;
1031 ret = pmem_lock_data_and_mm(file, data, &mm);
1032 /* if lock_data_and_mm fails either the task that mapped the fd, or
1033 * the vma that mapped it have already gone away, nothing more
1034 * needs to be done */
1037 /* unmap everything */
1038 /* delete the regions and region list nothing is mapped any more */
1040 list_for_each_safe(elt, elt2, &data->region_list) {
1041 region_node = list_entry(elt, struct pmem_region_node,
1043 pmem_unmap_pfn_range(id, data->vma, data,
1044 region_node->region.offset,
1045 region_node->region.len);
1049 /* delete the master file */
1050 pmem_unlock_data_and_mm(data, mm);
1053 static void pmem_get_size(struct pmem_region *region, struct file *file)
1055 struct pmem_data *data = (struct pmem_data *)file->private_data;
1056 int id = get_id(file);
1058 if (!has_allocation(file)) {
1063 region->offset = pmem_start_addr(id, data);
1064 region->len = pmem_len(id, data);
1066 DLOG("offset %lx len %lx\n", region->offset, region->len);
1070 static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1072 struct pmem_data *data;
1073 int id = get_id(file);
1078 struct pmem_region region;
1080 if (!has_allocation(file)) {
1084 data = (struct pmem_data *)file->private_data;
1085 region.offset = pmem_start_addr(id, data);
1086 region.len = pmem_len(id, data);
1088 printk(KERN_INFO "pmem: request for physical address of pmem region "
1089 "from process %d.\n", current->pid);
1090 if (copy_to_user((void __user *)arg, ®ion,
1091 sizeof(struct pmem_region)))
1097 struct pmem_region region;
1098 if (copy_from_user(®ion, (void __user *)arg,
1099 sizeof(struct pmem_region)))
1101 data = (struct pmem_data *)file->private_data;
1102 return pmem_remap(®ion, file, PMEM_MAP);
1107 struct pmem_region region;
1108 if (copy_from_user(®ion, (void __user *)arg,
1109 sizeof(struct pmem_region)))
1111 data = (struct pmem_data *)file->private_data;
1112 return pmem_remap(®ion, file, PMEM_UNMAP);
1117 struct pmem_region region;
1119 pmem_get_size(®ion, file);
1120 if (copy_to_user((void __user *)arg, ®ion,
1121 sizeof(struct pmem_region)))
1125 case PMEM_GET_TOTAL_SIZE:
1127 struct pmem_region region;
1128 DLOG("get total size\n");
1131 region.len = pmem[id].size;
1132 if (copy_to_user((void __user *)arg, ®ion,
1133 sizeof(struct pmem_region)))
1139 if (has_allocation(file))
1141 data = (struct pmem_data *)file->private_data;
1142 data->index = pmem_allocate(id, arg);
1147 return pmem_connect(arg, file);
1151 return pmem[id].ioctl(file, cmd, arg);
1158 static ssize_t debug_open(struct inode *inode, struct file *file)
1160 file->private_data = inode->i_private;
1164 static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
1167 struct list_head *elt, *elt2;
1168 struct pmem_data *data;
1169 struct pmem_region_node *region_node;
1170 int id = (int)file->private_data;
1171 const int debug_bufmax = 4096;
1172 static char buffer[4096];
1175 DLOG("debug open\n");
1176 n = scnprintf(buffer, debug_bufmax,
1177 "pid #: mapped regions (offset, len) (offset,len)...\n");
1179 down(&pmem[id].data_list_sem);
1180 list_for_each(elt, &pmem[id].data_list) {
1181 data = list_entry(elt, struct pmem_data, list);
1182 down_read(&data->sem);
1183 n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
1185 list_for_each(elt2, &data->region_list) {
1186 region_node = list_entry(elt2, struct pmem_region_node,
1188 n += scnprintf(buffer + n, debug_bufmax - n,
1190 region_node->region.offset,
1191 region_node->region.len);
1193 n += scnprintf(buffer + n, debug_bufmax - n, "\n");
1194 up_read(&data->sem);
1196 up(&pmem[id].data_list_sem);
1200 return simple_read_from_buffer(buf, count, ppos, buffer, n);
1203 static struct file_operations debug_fops = {
1210 static struct miscdevice pmem_dev = {
1216 int pmem_setup(struct android_pmem_platform_data *pdata,
1217 long (*ioctl)(struct file *, unsigned int, unsigned long),
1218 int (*release)(struct inode *, struct file *))
1225 pmem[id].no_allocator = pdata->no_allocator;
1226 pmem[id].cached = pdata->cached;
1227 pmem[id].buffered = pdata->buffered;
1228 pmem[id].base = pdata->start;
1229 pmem[id].size = pdata->size;
1230 pmem[id].ioctl = ioctl;
1231 pmem[id].release = release;
1232 init_rwsem(&pmem[id].bitmap_sem);
1233 init_MUTEX(&pmem[id].data_list_sem);
1234 INIT_LIST_HEAD(&pmem[id].data_list);
1235 pmem[id].dev.name = pdata->name;
1236 pmem[id].dev.minor = id;
1237 pmem[id].dev.fops = &pmem_fops;
1238 printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
1240 err = misc_register(&pmem[id].dev);
1242 printk(KERN_ALERT "Unable to register pmem driver!\n");
1243 goto err_cant_register_device;
1245 pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
1247 pmem[id].bitmap = kmalloc(pmem[id].num_entries *
1248 sizeof(struct pmem_bits), GFP_KERNEL);
1249 if (!pmem[id].bitmap)
1250 goto err_no_mem_for_metadata;
1252 memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
1253 pmem[id].num_entries);
1255 for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
1256 if ((pmem[id].num_entries) & 1<<i) {
1257 PMEM_ORDER(id, index) = i;
1258 index = PMEM_NEXT_INDEX(id, index);
1262 if (pmem[id].cached)
1263 pmem[id].vbase = ioremap_cached(pmem[id].base,
1265 #ifdef ioremap_ext_buffered
1266 else if (pmem[id].buffered)
1267 pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
1271 pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
1273 if (pmem[id].vbase == 0)
1274 goto error_cant_remap;
1276 pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
1277 if (pmem[id].no_allocator)
1278 pmem[id].allocated = 0;
1281 debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
1286 kfree(pmem[id].bitmap);
1287 err_no_mem_for_metadata:
1288 misc_deregister(&pmem[id].dev);
1289 err_cant_register_device:
1293 static int pmem_probe(struct platform_device *pdev)
1295 struct android_pmem_platform_data *pdata;
1297 if (!pdev || !pdev->dev.platform_data) {
1298 printk(KERN_ALERT "Unable to probe pmem!\n");
1301 pdata = pdev->dev.platform_data;
1302 return pmem_setup(pdata, NULL, NULL);
1306 static int pmem_remove(struct platform_device *pdev)
1309 __free_page(pfn_to_page(pmem[id].garbage_pfn));
1310 misc_deregister(&pmem[id].dev);
1314 static struct platform_driver pmem_driver = {
1315 .probe = pmem_probe,
1316 .remove = pmem_remove,
1317 .driver = { .name = "android_pmem" }
1321 static int __init pmem_init(void)
1323 return platform_driver_register(&pmem_driver);
1326 static void __exit pmem_exit(void)
1328 platform_driver_unregister(&pmem_driver);
1331 module_init(pmem_init);
1332 module_exit(pmem_exit);