Merge branch 'linus' into sched/core
[pandora-kernel.git] / fs / proc / array.c
index 762aea9..e209f64 100644 (file)
@@ -82,6 +82,7 @@
 #include <linux/pid_namespace.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
+#include <linux/swapops.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -321,6 +322,94 @@ static inline void task_context_switch_counts(struct seq_file *m,
                        p->nivcsw);
 }
 
+#ifdef CONFIG_MMU
+
+struct stack_stats {
+       struct vm_area_struct *vma;
+       unsigned long   startpage;
+       unsigned long   usage;
+};
+
+static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
+                               unsigned long end, struct mm_walk *walk)
+{
+       struct stack_stats *ss = walk->private;
+       struct vm_area_struct *vma = ss->vma;
+       pte_t *pte, ptent;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+
+#ifdef CONFIG_STACK_GROWSUP
+               if (pte_present(ptent) || is_swap_pte(ptent))
+                       ss->usage = addr - ss->startpage + PAGE_SIZE;
+#else
+               if (pte_present(ptent) || is_swap_pte(ptent)) {
+                       ss->usage = ss->startpage - addr + PAGE_SIZE;
+                       pte++;
+                       ret = 1;
+                       break;
+               }
+#endif
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+       return ret;
+}
+
+static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
+                               struct task_struct *task)
+{
+       struct stack_stats ss;
+       struct mm_walk stack_walk = {
+               .pmd_entry = stack_usage_pte_range,
+               .mm = vma->vm_mm,
+               .private = &ss,
+       };
+
+       if (!vma->vm_mm || is_vm_hugetlb_page(vma))
+               return 0;
+
+       ss.vma = vma;
+       ss.startpage = task->stack_start & PAGE_MASK;
+       ss.usage = 0;
+
+#ifdef CONFIG_STACK_GROWSUP
+       walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
+               &stack_walk);
+#else
+       walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
+               &stack_walk);
+#endif
+       return ss.usage;
+}
+
+static inline void task_show_stack_usage(struct seq_file *m,
+                                               struct task_struct *task)
+{
+       struct vm_area_struct   *vma;
+       struct mm_struct        *mm = get_task_mm(task);
+
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, task->stack_start);
+               if (vma)
+                       seq_printf(m, "Stack usage:\t%lu kB\n",
+                               get_stack_usage_in_bytes(vma, task) >> 10);
+
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+       }
+}
+#else
+static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
+{
+}
+#endif         /* CONFIG_MMU */
+
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
        seq_printf(m, "Cpus_allowed:\t");
@@ -351,6 +440,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
        task_show_regs(m, task);
 #endif
        task_context_switch_counts(m, task);
+       task_show_stack_usage(m, task);
        return 0;
 }
 
@@ -492,7 +582,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                rsslim,
                mm ? mm->start_code : 0,
                mm ? mm->end_code : 0,
-               (permitted && mm) ? mm->start_stack : 0,
+               (permitted) ? task->stack_start : 0,
                esp,
                eip,
                /* The signal information here is obsolete.