net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / mm / percpu-vm.c
index ea53496..0539f6a 100644 (file)
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
 
        if (!pages || !bitmap) {
                if (may_alloc && !pages)
-                       pages = pcpu_mem_alloc(pages_size);
+                       pages = pcpu_mem_zalloc(pages_size);
                if (may_alloc && !bitmap)
-                       bitmap = pcpu_mem_alloc(bitmap_size);
+                       bitmap = pcpu_mem_zalloc(bitmap_size);
                if (!pages || !bitmap)
                        return NULL;
        }
 
-       memset(pages, 0, pages_size);
        bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
 
        *bitmapp = bitmap;
@@ -109,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                            int page_start, int page_end)
 {
        const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
-       unsigned int cpu;
+       unsigned int cpu, tcpu;
        int i;
 
        for_each_possible_cpu(cpu) {
@@ -117,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                        struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
 
                        *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
-                       if (!*pagep) {
-                               pcpu_free_pages(chunk, pages, populated,
-                                               page_start, page_end);
-                               return -ENOMEM;
-                       }
+                       if (!*pagep)
+                               goto err;
                }
        }
        return 0;
+
+err:
+       while (--i >= page_start)
+               __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+       for_each_possible_cpu(tcpu) {
+               if (tcpu == cpu)
+                       break;
+               for (i = page_start; i < page_end; i++)
+                       __free_page(pages[pcpu_page_idx(tcpu, i)]);
+       }
+       return -ENOMEM;
 }
 
 /**
@@ -143,8 +151,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
                                 int page_start, int page_end)
 {
        flush_cache_vunmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +214,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
                                      int page_start, int page_end)
 {
        flush_tlb_kernel_range(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -265,6 +273,7 @@ err:
                __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
                                   page_end - page_start);
        }
+       pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
        return err;
 }
 
@@ -284,8 +293,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
                                int page_start, int page_end)
 {
        flush_cache_vmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 /**