[Bluetooth] Restrict well known PSM to privileged users
[pandora-kernel.git] / kernel / pid.c
index 93e212f..2efe9d8 100644 (file)
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/hash.h>
+#include <linux/pid_namespace.h>
 
 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
 static struct hlist_head *pid_hash;
 static int pidhash_shift;
-static kmem_cache_t *pid_cachep;
+static struct kmem_cache *pid_cachep;
 
 int pid_max = PID_MAX_DEFAULT;
-int last_pid;
 
 #define RESERVED_PIDS          300
 
 int pid_max_min = RESERVED_PIDS + 1;
 int pid_max_max = PID_MAX_LIMIT;
 
-#define PIDMAP_ENTRIES         ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
 #define BITS_PER_PAGE          (PAGE_SIZE*8)
 #define BITS_PER_PAGE_MASK     (BITS_PER_PAGE-1)
-#define mk_pid(map, off)       (((map) - pidmap_array)*BITS_PER_PAGE + (off))
+
+static inline int mk_pid(struct pid_namespace *pid_ns,
+               struct pidmap *map, int off)
+{
+       return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
+}
+
 #define find_next_offset(map, off)                                     \
                find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
 
@@ -53,13 +58,16 @@ int pid_max_max = PID_MAX_LIMIT;
  * value does not cause lots of bitmaps to be allocated, but
  * the scheme scales to up to 4 million PIDs, runtime.
  */
-typedef struct pidmap {
-       atomic_t nr_free;
-       void *page;
-} pidmap_t;
-
-static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
-        { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
+struct pid_namespace init_pid_ns = {
+       .kref = {
+               .refcount       = ATOMIC_INIT(2),
+       },
+       .pidmap = {
+               [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
+       },
+       .last_pid = 0,
+       .child_reaper = &init_task
+};
 
 /*
  * Note: disable interrupts while the pidmap_lock is held as an
@@ -74,40 +82,41 @@ static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
  * irq handlers that take it we can leave the interrupts enabled.
  * For now it is easier to be safe than to prove it can't happen.
  */
+
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 
-static fastcall void free_pidmap(int pid)
+static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
 {
-       pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
+       struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
        int offset = pid & BITS_PER_PAGE_MASK;
 
        clear_bit(offset, map->page);
        atomic_inc(&map->nr_free);
 }
 
-static int alloc_pidmap(void)
+static int alloc_pidmap(struct pid_namespace *pid_ns)
 {
-       int i, offset, max_scan, pid, last = last_pid;
-       pidmap_t *map;
+       int i, offset, max_scan, pid, last = pid_ns->last_pid;
+       struct pidmap *map;
 
        pid = last + 1;
        if (pid >= pid_max)
                pid = RESERVED_PIDS;
        offset = pid & BITS_PER_PAGE_MASK;
-       map = &pidmap_array[pid/BITS_PER_PAGE];
+       map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
        max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
        for (i = 0; i <= max_scan; ++i) {
                if (unlikely(!map->page)) {
-                       unsigned long page = get_zeroed_page(GFP_KERNEL);
+                       void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
                        /*
                         * Free the page if someone raced with us
                         * installing it:
                         */
                        spin_lock_irq(&pidmap_lock);
                        if (map->page)
-                               free_page(page);
+                               kfree(page);
                        else
-                               map->page = (void *)page;
+                               map->page = page;
                        spin_unlock_irq(&pidmap_lock);
                        if (unlikely(!map->page))
                                break;
@@ -116,11 +125,11 @@ static int alloc_pidmap(void)
                        do {
                                if (!test_and_set_bit(offset, map->page)) {
                                        atomic_dec(&map->nr_free);
-                                       last_pid = pid;
+                                       pid_ns->last_pid = pid;
                                        return pid;
                                }
                                offset = find_next_offset(map, offset);
-                               pid = mk_pid(map, offset);
+                               pid = mk_pid(pid_ns, map, offset);
                        /*
                         * find_next_offset() found a bit, the pid from it
                         * is in-bounds, and if we fell back to the last
@@ -131,16 +140,34 @@ static int alloc_pidmap(void)
                                        (i != max_scan || pid < last ||
                                            !((last+1) & BITS_PER_PAGE_MASK)));
                }
-               if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
+               if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
                        ++map;
                        offset = 0;
                } else {
-                       map = &pidmap_array[0];
+                       map = &pid_ns->pidmap[0];
                        offset = RESERVED_PIDS;
                        if (unlikely(last == offset))
                                break;
                }
-               pid = mk_pid(map, offset);
+               pid = mk_pid(pid_ns, map, offset);
+       }
+       return -1;
+}
+
+static int next_pidmap(struct pid_namespace *pid_ns, int last)
+{
+       int offset;
+       struct pidmap *map, *end;
+
+       offset = (last + 1) & BITS_PER_PAGE_MASK;
+       map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
+       end = &pid_ns->pidmap[PIDMAP_ENTRIES];
+       for (; map < end; map++, offset = 0) {
+               if (unlikely(!map->page))
+                       continue;
+               offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
+               if (offset < BITS_PER_PAGE)
+                       return mk_pid(pid_ns, map, offset);
        }
        return -1;
 }
@@ -153,6 +180,7 @@ fastcall void put_pid(struct pid *pid)
             atomic_dec_and_test(&pid->count))
                kmem_cache_free(pid_cachep, pid);
 }
+EXPORT_SYMBOL_GPL(put_pid);
 
 static void delayed_put_pid(struct rcu_head *rhp)
 {
@@ -169,7 +197,7 @@ fastcall void free_pid(struct pid *pid)
        hlist_del_rcu(&pid->pid_chain);
        spin_unlock_irqrestore(&pidmap_lock, flags);
 
-       free_pidmap(pid->nr);
+       free_pidmap(current->nsproxy->pid_ns, pid->nr);
        call_rcu(&pid->rcu, delayed_put_pid);
 }
 
@@ -183,7 +211,7 @@ struct pid *alloc_pid(void)
        if (!pid)
                goto out;
 
-       nr = alloc_pidmap();
+       nr = alloc_pidmap(current->nsproxy->pid_ns);
        if (nr < 0)
                goto out_free;
 
@@ -217,15 +245,13 @@ struct pid * fastcall find_pid(int nr)
        }
        return NULL;
 }
+EXPORT_SYMBOL_GPL(find_pid);
 
 int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
 {
        struct pid_link *link;
        struct pid *pid;
 
-       WARN_ON(!task->pid); /* to be removed soon */
-       WARN_ON(!nr); /* to be removed soon */
-
        link = &task->pids[type];
        link->pid = pid = find_pid(nr);
        hlist_add_head_rcu(&link->node, &pid->tasks[type]);
@@ -252,6 +278,15 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type)
        free_pid(pid);
 }
 
+/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
+void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
+                          enum pid_type type)
+{
+       new->pids[type].pid = old->pids[type].pid;
+       hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
+       old->pids[type].pid = NULL;
+}
+
 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
 {
        struct task_struct *result = NULL;
@@ -274,6 +309,15 @@ struct task_struct *find_task_by_pid_type(int type, int nr)
 
 EXPORT_SYMBOL(find_task_by_pid_type);
 
+struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+{
+       struct pid *pid;
+       rcu_read_lock();
+       pid = get_pid(task->pids[type].pid);
+       rcu_read_unlock();
+       return pid;
+}
+
 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
 {
        struct task_struct *result;
@@ -296,6 +340,46 @@ struct pid *find_get_pid(pid_t nr)
        return pid;
 }
 
+/*
+ * Used by proc to find the first pid that is greater then or equal to nr.
+ *
+ * If there is a pid at nr this function is exactly the same as find_pid.
+ */
+struct pid *find_ge_pid(int nr)
+{
+       struct pid *pid;
+
+       do {
+               pid = find_pid(nr);
+               if (pid)
+                       break;
+               nr = next_pidmap(current->nsproxy->pid_ns, nr);
+       } while (nr > 0);
+
+       return pid;
+}
+EXPORT_SYMBOL_GPL(find_get_pid);
+
+int copy_pid_ns(int flags, struct task_struct *tsk)
+{
+       struct pid_namespace *old_ns = tsk->nsproxy->pid_ns;
+       int err = 0;
+
+       if (!old_ns)
+               return 0;
+
+       get_pid_ns(old_ns);
+       return err;
+}
+
+void free_pid_ns(struct kref *kref)
+{
+       struct pid_namespace *ns;
+
+       ns = container_of(kref, struct pid_namespace, kref);
+       kfree(ns);
+}
+
 /*
  * The pid hash table is scaled according to the amount of memory in the
  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
@@ -323,10 +407,10 @@ void __init pidhash_init(void)
 
 void __init pidmap_init(void)
 {
-       pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
+       init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
        /* Reserve PID 0. We never call free_pidmap(0) */
-       set_bit(0, pidmap_array->page);
-       atomic_dec(&pidmap_array->nr_free);
+       set_bit(0, init_pid_ns.pidmap[0].page);
+       atomic_dec(&init_pid_ns.pidmap[0].nr_free);
 
        pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
                                        __alignof__(struct pid),