Merge master.kernel.org:/pub/scm/linux/kernel/git/dtor/input
[pandora-kernel.git] / fs / char_dev.c
1 /*
2  *  linux/fs/char_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/devfs_fs_kernel.h>
18
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22
23 #ifdef CONFIG_KMOD
24 #include <linux/kmod.h>
25 #endif
26
27 static struct kobj_map *cdev_map;
28
29 #define MAX_PROBE_HASH 255      /* random */
30
31 static DECLARE_MUTEX(chrdevs_lock);
32
33 static struct char_device_struct {
34         struct char_device_struct *next;
35         unsigned int major;
36         unsigned int baseminor;
37         int minorct;
38         const char *name;
39         struct file_operations *fops;
40         struct cdev *cdev;              /* will die */
41 } *chrdevs[MAX_PROBE_HASH];
42
43 /* index in the above */
44 static inline int major_to_index(int major)
45 {
46         return major % MAX_PROBE_HASH;
47 }
48
49 /* get char device names in somewhat random order */
50 int get_chrdev_list(char *page)
51 {
52         struct char_device_struct *cd;
53         int i, len;
54
55         len = sprintf(page, "Character devices:\n");
56
57         down(&chrdevs_lock);
58         for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) {
59                 for (cd = chrdevs[i]; cd; cd = cd->next) {
60                         /*
61                          * if the current name, plus the 5 extra characters
62                          * in the device line for this entry
63                          * would run us off the page, we're done
64                          */
65                         if ((len+strlen(cd->name) + 5) >= PAGE_SIZE)
66                                 goto page_full;
67
68
69                         len += sprintf(page+len, "%3d %s\n",
70                                        cd->major, cd->name);
71                 }
72         }
73 page_full:
74         up(&chrdevs_lock);
75
76         return len;
77 }
78
79 /*
80  * Register a single major with a specified minor range.
81  *
82  * If major == 0 this functions will dynamically allocate a major and return
83  * its number.
84  *
85  * If major > 0 this function will attempt to reserve the passed range of
86  * minors and will return zero on success.
87  *
88  * Returns a -ve errno on failure.
89  */
90 static struct char_device_struct *
91 __register_chrdev_region(unsigned int major, unsigned int baseminor,
92                            int minorct, const char *name)
93 {
94         struct char_device_struct *cd, **cp;
95         int ret = 0;
96         int i;
97
98         cd = kmalloc(sizeof(struct char_device_struct), GFP_KERNEL);
99         if (cd == NULL)
100                 return ERR_PTR(-ENOMEM);
101
102         memset(cd, 0, sizeof(struct char_device_struct));
103
104         down(&chrdevs_lock);
105
106         /* temporary */
107         if (major == 0) {
108                 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
109                         if (chrdevs[i] == NULL)
110                                 break;
111                 }
112
113                 if (i == 0) {
114                         ret = -EBUSY;
115                         goto out;
116                 }
117                 major = i;
118                 ret = major;
119         }
120
121         cd->major = major;
122         cd->baseminor = baseminor;
123         cd->minorct = minorct;
124         cd->name = name;
125
126         i = major_to_index(major);
127
128         for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
129                 if ((*cp)->major > major ||
130                     ((*cp)->major == major && (*cp)->baseminor >= baseminor))
131                         break;
132         if (*cp && (*cp)->major == major &&
133             (*cp)->baseminor < baseminor + minorct) {
134                 ret = -EBUSY;
135                 goto out;
136         }
137         cd->next = *cp;
138         *cp = cd;
139         up(&chrdevs_lock);
140         return cd;
141 out:
142         up(&chrdevs_lock);
143         kfree(cd);
144         return ERR_PTR(ret);
145 }
146
147 static struct char_device_struct *
148 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
149 {
150         struct char_device_struct *cd = NULL, **cp;
151         int i = major_to_index(major);
152
153         down(&chrdevs_lock);
154         for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
155                 if ((*cp)->major == major &&
156                     (*cp)->baseminor == baseminor &&
157                     (*cp)->minorct == minorct)
158                         break;
159         if (*cp) {
160                 cd = *cp;
161                 *cp = cd->next;
162         }
163         up(&chrdevs_lock);
164         return cd;
165 }
166
167 int register_chrdev_region(dev_t from, unsigned count, const char *name)
168 {
169         struct char_device_struct *cd;
170         dev_t to = from + count;
171         dev_t n, next;
172
173         for (n = from; n < to; n = next) {
174                 next = MKDEV(MAJOR(n)+1, 0);
175                 if (next > to)
176                         next = to;
177                 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
178                                next - n, name);
179                 if (IS_ERR(cd))
180                         goto fail;
181         }
182         return 0;
183 fail:
184         to = n;
185         for (n = from; n < to; n = next) {
186                 next = MKDEV(MAJOR(n)+1, 0);
187                 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
188         }
189         return PTR_ERR(cd);
190 }
191
192 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
193                         const char *name)
194 {
195         struct char_device_struct *cd;
196         cd = __register_chrdev_region(0, baseminor, count, name);
197         if (IS_ERR(cd))
198                 return PTR_ERR(cd);
199         *dev = MKDEV(cd->major, cd->baseminor);
200         return 0;
201 }
202
203 int register_chrdev(unsigned int major, const char *name,
204                     struct file_operations *fops)
205 {
206         struct char_device_struct *cd;
207         struct cdev *cdev;
208         char *s;
209         int err = -ENOMEM;
210
211         cd = __register_chrdev_region(major, 0, 256, name);
212         if (IS_ERR(cd))
213                 return PTR_ERR(cd);
214         
215         cdev = cdev_alloc();
216         if (!cdev)
217                 goto out2;
218
219         cdev->owner = fops->owner;
220         cdev->ops = fops;
221         kobject_set_name(&cdev->kobj, "%s", name);
222         for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
223                 *s = '!';
224                 
225         err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
226         if (err)
227                 goto out;
228
229         cd->cdev = cdev;
230
231         return major ? 0 : cd->major;
232 out:
233         kobject_put(&cdev->kobj);
234 out2:
235         kfree(__unregister_chrdev_region(cd->major, 0, 256));
236         return err;
237 }
238
239 void unregister_chrdev_region(dev_t from, unsigned count)
240 {
241         dev_t to = from + count;
242         dev_t n, next;
243
244         for (n = from; n < to; n = next) {
245                 next = MKDEV(MAJOR(n)+1, 0);
246                 if (next > to)
247                         next = to;
248                 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
249         }
250 }
251
252 int unregister_chrdev(unsigned int major, const char *name)
253 {
254         struct char_device_struct *cd;
255         cd = __unregister_chrdev_region(major, 0, 256);
256         if (cd && cd->cdev)
257                 cdev_del(cd->cdev);
258         kfree(cd);
259         return 0;
260 }
261
262 static DEFINE_SPINLOCK(cdev_lock);
263
264 static struct kobject *cdev_get(struct cdev *p)
265 {
266         struct module *owner = p->owner;
267         struct kobject *kobj;
268
269         if (owner && !try_module_get(owner))
270                 return NULL;
271         kobj = kobject_get(&p->kobj);
272         if (!kobj)
273                 module_put(owner);
274         return kobj;
275 }
276
277 void cdev_put(struct cdev *p)
278 {
279         if (p) {
280                 struct module *owner = p->owner;
281                 kobject_put(&p->kobj);
282                 module_put(owner);
283         }
284 }
285
286 /*
287  * Called every time a character special file is opened
288  */
289 int chrdev_open(struct inode * inode, struct file * filp)
290 {
291         struct cdev *p;
292         struct cdev *new = NULL;
293         int ret = 0;
294
295         spin_lock(&cdev_lock);
296         p = inode->i_cdev;
297         if (!p) {
298                 struct kobject *kobj;
299                 int idx;
300                 spin_unlock(&cdev_lock);
301                 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
302                 if (!kobj)
303                         return -ENXIO;
304                 new = container_of(kobj, struct cdev, kobj);
305                 spin_lock(&cdev_lock);
306                 p = inode->i_cdev;
307                 if (!p) {
308                         inode->i_cdev = p = new;
309                         inode->i_cindex = idx;
310                         list_add(&inode->i_devices, &p->list);
311                         new = NULL;
312                 } else if (!cdev_get(p))
313                         ret = -ENXIO;
314         } else if (!cdev_get(p))
315                 ret = -ENXIO;
316         spin_unlock(&cdev_lock);
317         cdev_put(new);
318         if (ret)
319                 return ret;
320         filp->f_op = fops_get(p->ops);
321         if (!filp->f_op) {
322                 cdev_put(p);
323                 return -ENXIO;
324         }
325         if (filp->f_op->open) {
326                 lock_kernel();
327                 ret = filp->f_op->open(inode,filp);
328                 unlock_kernel();
329         }
330         if (ret)
331                 cdev_put(p);
332         return ret;
333 }
334
335 void cd_forget(struct inode *inode)
336 {
337         spin_lock(&cdev_lock);
338         list_del_init(&inode->i_devices);
339         inode->i_cdev = NULL;
340         spin_unlock(&cdev_lock);
341 }
342
343 static void cdev_purge(struct cdev *cdev)
344 {
345         spin_lock(&cdev_lock);
346         while (!list_empty(&cdev->list)) {
347                 struct inode *inode;
348                 inode = container_of(cdev->list.next, struct inode, i_devices);
349                 list_del_init(&inode->i_devices);
350                 inode->i_cdev = NULL;
351         }
352         spin_unlock(&cdev_lock);
353 }
354
355 /*
356  * Dummy default file-operations: the only thing this does
357  * is contain the open that then fills in the correct operations
358  * depending on the special file...
359  */
360 struct file_operations def_chr_fops = {
361         .open = chrdev_open,
362 };
363
364 static struct kobject *exact_match(dev_t dev, int *part, void *data)
365 {
366         struct cdev *p = data;
367         return &p->kobj;
368 }
369
370 static int exact_lock(dev_t dev, void *data)
371 {
372         struct cdev *p = data;
373         return cdev_get(p) ? 0 : -1;
374 }
375
376 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
377 {
378         p->dev = dev;
379         p->count = count;
380         return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
381 }
382
383 static void cdev_unmap(dev_t dev, unsigned count)
384 {
385         kobj_unmap(cdev_map, dev, count);
386 }
387
388 void cdev_del(struct cdev *p)
389 {
390         cdev_unmap(p->dev, p->count);
391         kobject_put(&p->kobj);
392 }
393
394
395 static void cdev_default_release(struct kobject *kobj)
396 {
397         struct cdev *p = container_of(kobj, struct cdev, kobj);
398         cdev_purge(p);
399 }
400
401 static void cdev_dynamic_release(struct kobject *kobj)
402 {
403         struct cdev *p = container_of(kobj, struct cdev, kobj);
404         cdev_purge(p);
405         kfree(p);
406 }
407
408 static struct kobj_type ktype_cdev_default = {
409         .release        = cdev_default_release,
410 };
411
412 static struct kobj_type ktype_cdev_dynamic = {
413         .release        = cdev_dynamic_release,
414 };
415
416 struct cdev *cdev_alloc(void)
417 {
418         struct cdev *p = kmalloc(sizeof(struct cdev), GFP_KERNEL);
419         if (p) {
420                 memset(p, 0, sizeof(struct cdev));
421                 p->kobj.ktype = &ktype_cdev_dynamic;
422                 INIT_LIST_HEAD(&p->list);
423                 kobject_init(&p->kobj);
424         }
425         return p;
426 }
427
428 void cdev_init(struct cdev *cdev, struct file_operations *fops)
429 {
430         memset(cdev, 0, sizeof *cdev);
431         INIT_LIST_HEAD(&cdev->list);
432         cdev->kobj.ktype = &ktype_cdev_default;
433         kobject_init(&cdev->kobj);
434         cdev->ops = fops;
435 }
436
437 static struct kobject *base_probe(dev_t dev, int *part, void *data)
438 {
439         if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
440                 /* Make old-style 2.4 aliases work */
441                 request_module("char-major-%d", MAJOR(dev));
442         return NULL;
443 }
444
445 void __init chrdev_init(void)
446 {
447         cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
448 }
449
450
451 /* Let modules do char dev stuff */
452 EXPORT_SYMBOL(register_chrdev_region);
453 EXPORT_SYMBOL(unregister_chrdev_region);
454 EXPORT_SYMBOL(alloc_chrdev_region);
455 EXPORT_SYMBOL(cdev_init);
456 EXPORT_SYMBOL(cdev_alloc);
457 EXPORT_SYMBOL(cdev_del);
458 EXPORT_SYMBOL(cdev_add);
459 EXPORT_SYMBOL(register_chrdev);
460 EXPORT_SYMBOL(unregister_chrdev);