mm, vmalloc: iterate vmap_area_list in get_vmalloc_info()
authorJoonsoo Kim <js1304@gmail.com>
Mon, 29 Apr 2013 22:07:34 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 29 Apr 2013 22:54:34 +0000 (15:54 -0700)
This patch is a preparatory step for removing vmlist entirely.  For
above purpose, we change iterating a vmap_list codes to iterating a
vmap_area_list.  It is somewhat trivial change, but just one thing
should be noticed.

vmlist is lack of information about some areas in vmalloc address space.
For example, vm_map_ram() allocate area in vmalloc address space, but it
doesn't make a link with vmlist.  To provide full information about
vmalloc address space is better idea, so we don't use va->vm and use
vmap_area directly.  This makes get_vmalloc_info() more precise.

Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Dave Anderson <anderson@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmalloc.c

index 59aa328..aee1f61 100644 (file)
@@ -2671,46 +2671,50 @@ module_init(proc_vmalloc_init);
 
 void get_vmalloc_info(struct vmalloc_info *vmi)
 {
-       struct vm_struct *vma;
+       struct vmap_area *va;
        unsigned long free_area_size;
        unsigned long prev_end;
 
        vmi->used = 0;
+       vmi->largest_chunk = 0;
 
-       if (!vmlist) {
-               vmi->largest_chunk = VMALLOC_TOTAL;
-       } else {
-               vmi->largest_chunk = 0;
+       prev_end = VMALLOC_START;
 
-               prev_end = VMALLOC_START;
-
-               read_lock(&vmlist_lock);
+       spin_lock(&vmap_area_lock);
 
-               for (vma = vmlist; vma; vma = vma->next) {
-                       unsigned long addr = (unsigned long) vma->addr;
+       if (list_empty(&vmap_area_list)) {
+               vmi->largest_chunk = VMALLOC_TOTAL;
+               goto out;
+       }
 
-                       /*
-                        * Some archs keep another range for modules in vmlist
-                        */
-                       if (addr < VMALLOC_START)
-                               continue;
-                       if (addr >= VMALLOC_END)
-                               break;
+       list_for_each_entry(va, &vmap_area_list, list) {
+               unsigned long addr = va->va_start;
 
-                       vmi->used += vma->size;
+               /*
+                * Some archs keep another range for modules in vmalloc space
+                */
+               if (addr < VMALLOC_START)
+                       continue;
+               if (addr >= VMALLOC_END)
+                       break;
 
-                       free_area_size = addr - prev_end;
-                       if (vmi->largest_chunk < free_area_size)
-                               vmi->largest_chunk = free_area_size;
+               if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+                       continue;
 
-                       prev_end = vma->size + addr;
-               }
+               vmi->used += (va->va_end - va->va_start);
 
-               if (VMALLOC_END - prev_end > vmi->largest_chunk)
-                       vmi->largest_chunk = VMALLOC_END - prev_end;
+               free_area_size = addr - prev_end;
+               if (vmi->largest_chunk < free_area_size)
+                       vmi->largest_chunk = free_area_size;
 
-               read_unlock(&vmlist_lock);
+               prev_end = va->va_end;
        }
+
+       if (VMALLOC_END - prev_end > vmi->largest_chunk)
+               vmi->largest_chunk = VMALLOC_END - prev_end;
+
+out:
+       spin_unlock(&vmap_area_lock);
 }
 #endif