mm: Rearrange struct page
[pandora-kernel.git] / include / linux / mm_types.h
index 2a78aae..3d76a43 100644 (file)
@@ -30,23 +30,60 @@ struct address_space;
  * moment. Note that we have no way to track which tasks are using
  * a page, though if it is a pagecache page, rmap structures can tell us
  * who is mapping it.
+ *
+ * The objects in struct page are organized in double word blocks in
+ * order to allows us to use atomic double word operations on portions
+ * of struct page. That is currently only used by slub but the arrangement
+ * allows the use of atomic double word operations on the flags/mapping
+ * and lru list pointers also.
  */
 struct page {
+       /* First double word block */
        unsigned long flags;            /* Atomic flags, some possibly
                                         * updated asynchronously */
-       atomic_t _count;                /* Usage count, see below. */
+       struct address_space *mapping;  /* If low bit clear, points to
+                                        * inode address_space, or NULL.
+                                        * If page mapped as anonymous
+                                        * memory, low bit is set, and
+                                        * it points to anon_vma object:
+                                        * see PAGE_MAPPING_ANON below.
+                                        */
+       /* Second double word */
        union {
-               atomic_t _mapcount;     /* Count of ptes mapped in mms,
-                                        * to show when page is mapped
-                                        * & limit reverse map searches.
+               struct {
+                       pgoff_t index;          /* Our offset within mapping. */
+                       atomic_t _mapcount;     /* Count of ptes mapped in mms,
+                                                        * to show when page is mapped
+                                                        * & limit reverse map searches.
+                                                        */
+                       atomic_t _count;                /* Usage count, see below. */
+               };
+
+               struct {                        /* SLUB cmpxchg_double area */
+                       void *freelist;
+                       union {
+                               unsigned long counters;
+                               struct {
+                                       unsigned inuse:16;
+                                       unsigned objects:15;
+                                       unsigned frozen:1;
+                                       /*
+                                        * Kernel may make use of this field even when slub
+                                        * uses the rest of the double word!
                                         */
-               struct {                /* SLUB */
-                       u16 inuse;
-                       u16 objects;
+                                       atomic_t _count;
+                               };
+                       };
                };
        };
+
+       /* Third double word block */
+       struct list_head lru;           /* Pageout list, eg. active_list
+                                        * protected by zone->lru_lock !
+                                        */
+
+       /* Remainder is not double word aligned */
        union {
-           struct {
                unsigned long private;          /* Mapping-private opaque data:
                                                 * usually used for buffer_heads
                                                 * if PagePrivate set; used for
@@ -54,27 +91,13 @@ struct page {
                                                 * indicates order in the buddy
                                                 * system if PG_buddy is set.
                                                 */
-               struct address_space *mapping;  /* If low bit clear, points to
-                                                * inode address_space, or NULL.
-                                                * If page mapped as anonymous
-                                                * memory, low bit is set, and
-                                                * it points to anon_vma object:
-                                                * see PAGE_MAPPING_ANON below.
-                                                */
-           };
 #if USE_SPLIT_PTLOCKS
-           spinlock_t ptl;
+               spinlock_t ptl;
 #endif
-           struct kmem_cache *slab;    /* SLUB: Pointer to slab */
-           struct page *first_page;    /* Compound tail pages */
-       };
-       union {
-               pgoff_t index;          /* Our offset within mapping. */
-               void *freelist;         /* SLUB: freelist req. slab lock */
+               struct kmem_cache *slab;        /* SLUB: Pointer to slab */
+               struct page *first_page;        /* Compound tail pages */
        };
-       struct list_head lru;           /* Pageout list, eg. active_list
-                                        * protected by zone->lru_lock !
-                                        */
+
        /*
         * On machines where all RAM is mapped into kernel address space,
         * we can simply calculate the virtual address. On machines with
@@ -100,7 +123,16 @@ struct page {
         */
        void *shadow;
 #endif
-};
+}
+/*
+ * If another subsystem starts using the double word pairing for atomic
+ * operations on struct page then it must change the #if to ensure
+ * proper alignment of the page struct.
+ */
+#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
+       __attribute__((__aligned__(2*sizeof(unsigned long))))
+#endif
+;
 
 typedef unsigned long __nocast vm_flags_t;
 
@@ -264,6 +296,8 @@ struct mm_struct {
 
        struct linux_binfmt *binfmt;
 
+       cpumask_var_t cpu_vm_mask_var;
+
        /* Architecture-specific MM context */
        mm_context_t context;
 
@@ -311,10 +345,18 @@ struct mm_struct {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 #endif
-
-       cpumask_var_t cpu_vm_mask_var;
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       struct cpumask cpumask_allocation;
+#endif
 };
 
+static inline void mm_init_cpumask(struct mm_struct *mm)
+{
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       mm->cpu_vm_mask_var = &mm->cpumask_allocation;
+#endif
+}
+
 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 {