Merge branch 'drm-forlinus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[pandora-kernel.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/highmem.h>
27 #include <linux/crash_dump.h>
28 #include <linux/backing-dev.h>
29 #include <linux/bootmem.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 /*
39  * Architectures vary in how they handle caching for addresses
40  * outside of main memory.
41  *
42  */
43 static inline int uncached_access(struct file *file, unsigned long addr)
44 {
45 #if defined(__i386__)
46         /*
47          * On the PPro and successors, the MTRRs are used to set
48          * memory types for physical addresses outside main memory,
49          * so blindly setting PCD or PWT on those pages is wrong.
50          * For Pentiums and earlier, the surround logic should disable
51          * caching for the high addresses through the KEN pin, but
52          * we maintain the tradition of paranoia in this code.
53          */
54         if (file->f_flags & O_SYNC)
55                 return 1;
56         return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
57                   test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
58                   test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
59                   test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
60           && addr >= __pa(high_memory);
61 #elif defined(__x86_64__)
62         /* 
63          * This is broken because it can generate memory type aliases,
64          * which can cause cache corruptions
65          * But it is only available for root and we have to be bug-to-bug
66          * compatible with i386.
67          */
68         if (file->f_flags & O_SYNC)
69                 return 1;
70         /* same behaviour as i386. PAT always set to cached and MTRRs control the
71            caching behaviour. 
72            Hopefully a full PAT implementation will fix that soon. */      
73         return 0;
74 #elif defined(CONFIG_IA64)
75         /*
76          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
77          */
78         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
79 #else
80         /*
81          * Accessing memory above the top the kernel knows about or through a file pointer
82          * that was marked O_SYNC will be done non-cached.
83          */
84         if (file->f_flags & O_SYNC)
85                 return 1;
86         return addr >= __pa(high_memory);
87 #endif
88 }
89
90 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
92 {
93         unsigned long end_mem;
94
95         end_mem = __pa(high_memory);
96         if (addr >= end_mem)
97                 return 0;
98
99         if (*count > end_mem - addr)
100                 *count = end_mem - addr;
101
102         return 1;
103 }
104
105 static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size)
106 {
107         return 1;
108 }
109 #endif
110
111 /*
112  * This funcion reads the *physical* memory. The f_pos points directly to the 
113  * memory location. 
114  */
115 static ssize_t read_mem(struct file * file, char __user * buf,
116                         size_t count, loff_t *ppos)
117 {
118         unsigned long p = *ppos;
119         ssize_t read, sz;
120         char *ptr;
121
122         if (!valid_phys_addr_range(p, &count))
123                 return -EFAULT;
124         read = 0;
125 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
126         /* we don't have page 0 mapped on sparc and m68k.. */
127         if (p < PAGE_SIZE) {
128                 sz = PAGE_SIZE - p;
129                 if (sz > count) 
130                         sz = count; 
131                 if (sz > 0) {
132                         if (clear_user(buf, sz))
133                                 return -EFAULT;
134                         buf += sz; 
135                         p += sz; 
136                         count -= sz; 
137                         read += sz; 
138                 }
139         }
140 #endif
141
142         while (count > 0) {
143                 /*
144                  * Handle first page in case it's not aligned
145                  */
146                 if (-p & (PAGE_SIZE - 1))
147                         sz = -p & (PAGE_SIZE - 1);
148                 else
149                         sz = PAGE_SIZE;
150
151                 sz = min_t(unsigned long, sz, count);
152
153                 /*
154                  * On ia64 if a page has been mapped somewhere as
155                  * uncached, then it must also be accessed uncached
156                  * by the kernel or data corruption may occur
157                  */
158                 ptr = xlate_dev_mem_ptr(p);
159
160                 if (copy_to_user(buf, ptr, sz))
161                         return -EFAULT;
162                 buf += sz;
163                 p += sz;
164                 count -= sz;
165                 read += sz;
166         }
167
168         *ppos += read;
169         return read;
170 }
171
172 static ssize_t write_mem(struct file * file, const char __user * buf, 
173                          size_t count, loff_t *ppos)
174 {
175         unsigned long p = *ppos;
176         ssize_t written, sz;
177         unsigned long copied;
178         void *ptr;
179
180         if (!valid_phys_addr_range(p, &count))
181                 return -EFAULT;
182
183         written = 0;
184
185 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
186         /* we don't have page 0 mapped on sparc and m68k.. */
187         if (p < PAGE_SIZE) {
188                 unsigned long sz = PAGE_SIZE - p;
189                 if (sz > count)
190                         sz = count;
191                 /* Hmm. Do something? */
192                 buf += sz;
193                 p += sz;
194                 count -= sz;
195                 written += sz;
196         }
197 #endif
198
199         while (count > 0) {
200                 /*
201                  * Handle first page in case it's not aligned
202                  */
203                 if (-p & (PAGE_SIZE - 1))
204                         sz = -p & (PAGE_SIZE - 1);
205                 else
206                         sz = PAGE_SIZE;
207
208                 sz = min_t(unsigned long, sz, count);
209
210                 /*
211                  * On ia64 if a page has been mapped somewhere as
212                  * uncached, then it must also be accessed uncached
213                  * by the kernel or data corruption may occur
214                  */
215                 ptr = xlate_dev_mem_ptr(p);
216
217                 copied = copy_from_user(ptr, buf, sz);
218                 if (copied) {
219                         ssize_t ret;
220
221                         ret = written + (sz - copied);
222                         if (ret)
223                                 return ret;
224                         return -EFAULT;
225                 }
226                 buf += sz;
227                 p += sz;
228                 count -= sz;
229                 written += sz;
230         }
231
232         *ppos += written;
233         return written;
234 }
235
236 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
237 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
238                                      unsigned long size, pgprot_t vma_prot)
239 {
240 #ifdef pgprot_noncached
241         unsigned long offset = pfn << PAGE_SHIFT;
242
243         if (uncached_access(file, offset))
244                 return pgprot_noncached(vma_prot);
245 #endif
246         return vma_prot;
247 }
248 #endif
249
250 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
251 {
252         size_t size = vma->vm_end - vma->vm_start;
253
254         if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size))
255                 return -EINVAL;
256
257         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
258                                                  size,
259                                                  vma->vm_page_prot);
260
261         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
262         if (remap_pfn_range(vma,
263                             vma->vm_start,
264                             vma->vm_pgoff,
265                             size,
266                             vma->vm_page_prot))
267                 return -EAGAIN;
268         return 0;
269 }
270
271 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
272 {
273         unsigned long pfn;
274
275         /* Turn a kernel-virtual address into a physical page frame */
276         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
277
278         /*
279          * RED-PEN: on some architectures there is more mapped memory
280          * than available in mem_map which pfn_valid checks
281          * for. Perhaps should add a new macro here.
282          *
283          * RED-PEN: vmalloc is not supported right now.
284          */
285         if (!pfn_valid(pfn))
286                 return -EIO;
287
288         vma->vm_pgoff = pfn;
289         return mmap_mem(file, vma);
290 }
291
292 #ifdef CONFIG_CRASH_DUMP
293 /*
294  * Read memory corresponding to the old kernel.
295  */
296 static ssize_t read_oldmem(struct file *file, char __user *buf,
297                                 size_t count, loff_t *ppos)
298 {
299         unsigned long pfn, offset;
300         size_t read = 0, csize;
301         int rc = 0;
302
303         while (count) {
304                 pfn = *ppos / PAGE_SIZE;
305                 if (pfn > saved_max_pfn)
306                         return read;
307
308                 offset = (unsigned long)(*ppos % PAGE_SIZE);
309                 if (count > PAGE_SIZE - offset)
310                         csize = PAGE_SIZE - offset;
311                 else
312                         csize = count;
313
314                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
315                 if (rc < 0)
316                         return rc;
317                 buf += csize;
318                 *ppos += csize;
319                 read += csize;
320                 count -= csize;
321         }
322         return read;
323 }
324 #endif
325
326 extern long vread(char *buf, char *addr, unsigned long count);
327 extern long vwrite(char *buf, char *addr, unsigned long count);
328
329 /*
330  * This function reads the *virtual* memory as seen by the kernel.
331  */
332 static ssize_t read_kmem(struct file *file, char __user *buf, 
333                          size_t count, loff_t *ppos)
334 {
335         unsigned long p = *ppos;
336         ssize_t low_count, read, sz;
337         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
338
339         read = 0;
340         if (p < (unsigned long) high_memory) {
341                 low_count = count;
342                 if (count > (unsigned long) high_memory - p)
343                         low_count = (unsigned long) high_memory - p;
344
345 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
346                 /* we don't have page 0 mapped on sparc and m68k.. */
347                 if (p < PAGE_SIZE && low_count > 0) {
348                         size_t tmp = PAGE_SIZE - p;
349                         if (tmp > low_count) tmp = low_count;
350                         if (clear_user(buf, tmp))
351                                 return -EFAULT;
352                         buf += tmp;
353                         p += tmp;
354                         read += tmp;
355                         low_count -= tmp;
356                         count -= tmp;
357                 }
358 #endif
359                 while (low_count > 0) {
360                         /*
361                          * Handle first page in case it's not aligned
362                          */
363                         if (-p & (PAGE_SIZE - 1))
364                                 sz = -p & (PAGE_SIZE - 1);
365                         else
366                                 sz = PAGE_SIZE;
367
368                         sz = min_t(unsigned long, sz, low_count);
369
370                         /*
371                          * On ia64 if a page has been mapped somewhere as
372                          * uncached, then it must also be accessed uncached
373                          * by the kernel or data corruption may occur
374                          */
375                         kbuf = xlate_dev_kmem_ptr((char *)p);
376
377                         if (copy_to_user(buf, kbuf, sz))
378                                 return -EFAULT;
379                         buf += sz;
380                         p += sz;
381                         read += sz;
382                         low_count -= sz;
383                         count -= sz;
384                 }
385         }
386
387         if (count > 0) {
388                 kbuf = (char *)__get_free_page(GFP_KERNEL);
389                 if (!kbuf)
390                         return -ENOMEM;
391                 while (count > 0) {
392                         int len = count;
393
394                         if (len > PAGE_SIZE)
395                                 len = PAGE_SIZE;
396                         len = vread(kbuf, (char *)p, len);
397                         if (!len)
398                                 break;
399                         if (copy_to_user(buf, kbuf, len)) {
400                                 free_page((unsigned long)kbuf);
401                                 return -EFAULT;
402                         }
403                         count -= len;
404                         buf += len;
405                         read += len;
406                         p += len;
407                 }
408                 free_page((unsigned long)kbuf);
409         }
410         *ppos = p;
411         return read;
412 }
413
414
415 static inline ssize_t
416 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
417               size_t count, loff_t *ppos)
418 {
419         ssize_t written, sz;
420         unsigned long copied;
421
422         written = 0;
423 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
424         /* we don't have page 0 mapped on sparc and m68k.. */
425         if (realp < PAGE_SIZE) {
426                 unsigned long sz = PAGE_SIZE - realp;
427                 if (sz > count)
428                         sz = count;
429                 /* Hmm. Do something? */
430                 buf += sz;
431                 p += sz;
432                 realp += sz;
433                 count -= sz;
434                 written += sz;
435         }
436 #endif
437
438         while (count > 0) {
439                 char *ptr;
440                 /*
441                  * Handle first page in case it's not aligned
442                  */
443                 if (-realp & (PAGE_SIZE - 1))
444                         sz = -realp & (PAGE_SIZE - 1);
445                 else
446                         sz = PAGE_SIZE;
447
448                 sz = min_t(unsigned long, sz, count);
449
450                 /*
451                  * On ia64 if a page has been mapped somewhere as
452                  * uncached, then it must also be accessed uncached
453                  * by the kernel or data corruption may occur
454                  */
455                 ptr = xlate_dev_kmem_ptr(p);
456
457                 copied = copy_from_user(ptr, buf, sz);
458                 if (copied) {
459                         ssize_t ret;
460
461                         ret = written + (sz - copied);
462                         if (ret)
463                                 return ret;
464                         return -EFAULT;
465                 }
466                 buf += sz;
467                 p += sz;
468                 realp += sz;
469                 count -= sz;
470                 written += sz;
471         }
472
473         *ppos += written;
474         return written;
475 }
476
477
478 /*
479  * This function writes to the *virtual* memory as seen by the kernel.
480  */
481 static ssize_t write_kmem(struct file * file, const char __user * buf, 
482                           size_t count, loff_t *ppos)
483 {
484         unsigned long p = *ppos;
485         ssize_t wrote = 0;
486         ssize_t virtr = 0;
487         ssize_t written;
488         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
489
490         if (p < (unsigned long) high_memory) {
491
492                 wrote = count;
493                 if (count > (unsigned long) high_memory - p)
494                         wrote = (unsigned long) high_memory - p;
495
496                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
497                 if (written != wrote)
498                         return written;
499                 wrote = written;
500                 p += wrote;
501                 buf += wrote;
502                 count -= wrote;
503         }
504
505         if (count > 0) {
506                 kbuf = (char *)__get_free_page(GFP_KERNEL);
507                 if (!kbuf)
508                         return wrote ? wrote : -ENOMEM;
509                 while (count > 0) {
510                         int len = count;
511
512                         if (len > PAGE_SIZE)
513                                 len = PAGE_SIZE;
514                         if (len) {
515                                 written = copy_from_user(kbuf, buf, len);
516                                 if (written) {
517                                         ssize_t ret;
518
519                                         free_page((unsigned long)kbuf);
520                                         ret = wrote + virtr + (len - written);
521                                         return ret ? ret : -EFAULT;
522                                 }
523                         }
524                         len = vwrite(kbuf, (char *)p, len);
525                         count -= len;
526                         buf += len;
527                         virtr += len;
528                         p += len;
529                 }
530                 free_page((unsigned long)kbuf);
531         }
532
533         *ppos = p;
534         return virtr + wrote;
535 }
536
537 #if defined(CONFIG_ISA) || !defined(__mc68000__)
538 static ssize_t read_port(struct file * file, char __user * buf,
539                          size_t count, loff_t *ppos)
540 {
541         unsigned long i = *ppos;
542         char __user *tmp = buf;
543
544         if (!access_ok(VERIFY_WRITE, buf, count))
545                 return -EFAULT; 
546         while (count-- > 0 && i < 65536) {
547                 if (__put_user(inb(i),tmp) < 0) 
548                         return -EFAULT;  
549                 i++;
550                 tmp++;
551         }
552         *ppos = i;
553         return tmp-buf;
554 }
555
556 static ssize_t write_port(struct file * file, const char __user * buf,
557                           size_t count, loff_t *ppos)
558 {
559         unsigned long i = *ppos;
560         const char __user * tmp = buf;
561
562         if (!access_ok(VERIFY_READ,buf,count))
563                 return -EFAULT;
564         while (count-- > 0 && i < 65536) {
565                 char c;
566                 if (__get_user(c, tmp)) 
567                         return -EFAULT; 
568                 outb(c,i);
569                 i++;
570                 tmp++;
571         }
572         *ppos = i;
573         return tmp-buf;
574 }
575 #endif
576
577 static ssize_t read_null(struct file * file, char __user * buf,
578                          size_t count, loff_t *ppos)
579 {
580         return 0;
581 }
582
583 static ssize_t write_null(struct file * file, const char __user * buf,
584                           size_t count, loff_t *ppos)
585 {
586         return count;
587 }
588
589 #ifdef CONFIG_MMU
590 /*
591  * For fun, we are using the MMU for this.
592  */
593 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
594 {
595         struct mm_struct *mm;
596         struct vm_area_struct * vma;
597         unsigned long addr=(unsigned long)buf;
598
599         mm = current->mm;
600         /* Oops, this was forgotten before. -ben */
601         down_read(&mm->mmap_sem);
602
603         /* For private mappings, just map in zero pages. */
604         for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
605                 unsigned long count;
606
607                 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
608                         goto out_up;
609                 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
610                         break;
611                 count = vma->vm_end - addr;
612                 if (count > size)
613                         count = size;
614
615                 zap_page_range(vma, addr, count, NULL);
616                 zeromap_page_range(vma, addr, count, PAGE_COPY);
617
618                 size -= count;
619                 buf += count;
620                 addr += count;
621                 if (size == 0)
622                         goto out_up;
623         }
624
625         up_read(&mm->mmap_sem);
626         
627         /* The shared case is hard. Let's do the conventional zeroing. */ 
628         do {
629                 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
630                 if (unwritten)
631                         return size + unwritten - PAGE_SIZE;
632                 cond_resched();
633                 buf += PAGE_SIZE;
634                 size -= PAGE_SIZE;
635         } while (size);
636
637         return size;
638 out_up:
639         up_read(&mm->mmap_sem);
640         return size;
641 }
642
643 static ssize_t read_zero(struct file * file, char __user * buf, 
644                          size_t count, loff_t *ppos)
645 {
646         unsigned long left, unwritten, written = 0;
647
648         if (!count)
649                 return 0;
650
651         if (!access_ok(VERIFY_WRITE, buf, count))
652                 return -EFAULT;
653
654         left = count;
655
656         /* do we want to be clever? Arbitrary cut-off */
657         if (count >= PAGE_SIZE*4) {
658                 unsigned long partial;
659
660                 /* How much left of the page? */
661                 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
662                 unwritten = clear_user(buf, partial);
663                 written = partial - unwritten;
664                 if (unwritten)
665                         goto out;
666                 left -= partial;
667                 buf += partial;
668                 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
669                 written += (left & PAGE_MASK) - unwritten;
670                 if (unwritten)
671                         goto out;
672                 buf += left & PAGE_MASK;
673                 left &= ~PAGE_MASK;
674         }
675         unwritten = clear_user(buf, left);
676         written += left - unwritten;
677 out:
678         return written ? written : -EFAULT;
679 }
680
681 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
682 {
683         if (vma->vm_flags & VM_SHARED)
684                 return shmem_zero_setup(vma);
685         if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
686                 return -EAGAIN;
687         return 0;
688 }
689 #else /* CONFIG_MMU */
690 static ssize_t read_zero(struct file * file, char * buf, 
691                          size_t count, loff_t *ppos)
692 {
693         size_t todo = count;
694
695         while (todo) {
696                 size_t chunk = todo;
697
698                 if (chunk > 4096)
699                         chunk = 4096;   /* Just for latency reasons */
700                 if (clear_user(buf, chunk))
701                         return -EFAULT;
702                 buf += chunk;
703                 todo -= chunk;
704                 cond_resched();
705         }
706         return count;
707 }
708
709 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
710 {
711         return -ENOSYS;
712 }
713 #endif /* CONFIG_MMU */
714
715 static ssize_t write_full(struct file * file, const char __user * buf,
716                           size_t count, loff_t *ppos)
717 {
718         return -ENOSPC;
719 }
720
721 /*
722  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
723  * can fopen() both devices with "a" now.  This was previously impossible.
724  * -- SRB.
725  */
726
727 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
728 {
729         return file->f_pos = 0;
730 }
731
732 /*
733  * The memory devices use the full 32/64 bits of the offset, and so we cannot
734  * check against negative addresses: they are ok. The return value is weird,
735  * though, in that case (0).
736  *
737  * also note that seeking relative to the "end of file" isn't supported:
738  * it has no meaning, so it returns -EINVAL.
739  */
740 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
741 {
742         loff_t ret;
743
744         mutex_lock(&file->f_dentry->d_inode->i_mutex);
745         switch (orig) {
746                 case 0:
747                         file->f_pos = offset;
748                         ret = file->f_pos;
749                         force_successful_syscall_return();
750                         break;
751                 case 1:
752                         file->f_pos += offset;
753                         ret = file->f_pos;
754                         force_successful_syscall_return();
755                         break;
756                 default:
757                         ret = -EINVAL;
758         }
759         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
760         return ret;
761 }
762
763 static int open_port(struct inode * inode, struct file * filp)
764 {
765         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
766 }
767
768 #define zero_lseek      null_lseek
769 #define full_lseek      null_lseek
770 #define write_zero      write_null
771 #define read_full       read_zero
772 #define open_mem        open_port
773 #define open_kmem       open_mem
774 #define open_oldmem     open_mem
775
776 static struct file_operations mem_fops = {
777         .llseek         = memory_lseek,
778         .read           = read_mem,
779         .write          = write_mem,
780         .mmap           = mmap_mem,
781         .open           = open_mem,
782 };
783
784 static struct file_operations kmem_fops = {
785         .llseek         = memory_lseek,
786         .read           = read_kmem,
787         .write          = write_kmem,
788         .mmap           = mmap_kmem,
789         .open           = open_kmem,
790 };
791
792 static struct file_operations null_fops = {
793         .llseek         = null_lseek,
794         .read           = read_null,
795         .write          = write_null,
796 };
797
798 #if defined(CONFIG_ISA) || !defined(__mc68000__)
799 static struct file_operations port_fops = {
800         .llseek         = memory_lseek,
801         .read           = read_port,
802         .write          = write_port,
803         .open           = open_port,
804 };
805 #endif
806
807 static struct file_operations zero_fops = {
808         .llseek         = zero_lseek,
809         .read           = read_zero,
810         .write          = write_zero,
811         .mmap           = mmap_zero,
812 };
813
814 static struct backing_dev_info zero_bdi = {
815         .capabilities   = BDI_CAP_MAP_COPY,
816 };
817
818 static struct file_operations full_fops = {
819         .llseek         = full_lseek,
820         .read           = read_full,
821         .write          = write_full,
822 };
823
824 #ifdef CONFIG_CRASH_DUMP
825 static struct file_operations oldmem_fops = {
826         .read   = read_oldmem,
827         .open   = open_oldmem,
828 };
829 #endif
830
831 static ssize_t kmsg_write(struct file * file, const char __user * buf,
832                           size_t count, loff_t *ppos)
833 {
834         char *tmp;
835         ssize_t ret;
836
837         tmp = kmalloc(count + 1, GFP_KERNEL);
838         if (tmp == NULL)
839                 return -ENOMEM;
840         ret = -EFAULT;
841         if (!copy_from_user(tmp, buf, count)) {
842                 tmp[count] = 0;
843                 ret = printk("%s", tmp);
844                 if (ret > count)
845                         /* printk can add a prefix */
846                         ret = count;
847         }
848         kfree(tmp);
849         return ret;
850 }
851
852 static struct file_operations kmsg_fops = {
853         .write =        kmsg_write,
854 };
855
856 static int memory_open(struct inode * inode, struct file * filp)
857 {
858         switch (iminor(inode)) {
859                 case 1:
860                         filp->f_op = &mem_fops;
861                         break;
862                 case 2:
863                         filp->f_op = &kmem_fops;
864                         break;
865                 case 3:
866                         filp->f_op = &null_fops;
867                         break;
868 #if defined(CONFIG_ISA) || !defined(__mc68000__)
869                 case 4:
870                         filp->f_op = &port_fops;
871                         break;
872 #endif
873                 case 5:
874                         filp->f_mapping->backing_dev_info = &zero_bdi;
875                         filp->f_op = &zero_fops;
876                         break;
877                 case 7:
878                         filp->f_op = &full_fops;
879                         break;
880                 case 8:
881                         filp->f_op = &random_fops;
882                         break;
883                 case 9:
884                         filp->f_op = &urandom_fops;
885                         break;
886                 case 11:
887                         filp->f_op = &kmsg_fops;
888                         break;
889 #ifdef CONFIG_CRASH_DUMP
890                 case 12:
891                         filp->f_op = &oldmem_fops;
892                         break;
893 #endif
894                 default:
895                         return -ENXIO;
896         }
897         if (filp->f_op && filp->f_op->open)
898                 return filp->f_op->open(inode,filp);
899         return 0;
900 }
901
902 static struct file_operations memory_fops = {
903         .open           = memory_open,  /* just a selector for the real open */
904 };
905
906 static const struct {
907         unsigned int            minor;
908         char                    *name;
909         umode_t                 mode;
910         struct file_operations  *fops;
911 } devlist[] = { /* list of minor devices */
912         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
913         {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
914         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
915 #if defined(CONFIG_ISA) || !defined(__mc68000__)
916         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
917 #endif
918         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
919         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
920         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
921         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
922         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
923 #ifdef CONFIG_CRASH_DUMP
924         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
925 #endif
926 };
927
928 static struct class *mem_class;
929
930 static int __init chr_dev_init(void)
931 {
932         int i;
933
934         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
935                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
936
937         mem_class = class_create(THIS_MODULE, "mem");
938         for (i = 0; i < ARRAY_SIZE(devlist); i++) {
939                 class_device_create(mem_class, NULL,
940                                         MKDEV(MEM_MAJOR, devlist[i].minor),
941                                         NULL, devlist[i].name);
942                 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
943                                 S_IFCHR | devlist[i].mode, devlist[i].name);
944         }
945         
946         return 0;
947 }
948
949 fs_initcall(chr_dev_init);