KAISER: Kernel Address Isolation
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_ds.c
index 2d4e76b..fb933cd 100644 (file)
@@ -2,10 +2,14 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 
+#include <asm/kaiser.h>
 #include <asm/perf_event.h>
 
 #include "perf_event.h"
 
+static
+DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
+
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE                24
 
@@ -60,6 +64,39 @@ void fini_debug_store_on_cpu(int cpu)
        wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
 }
 
+static void *dsalloc(size_t size, gfp_t flags, int node)
+{
+#ifdef CONFIG_KAISER
+       unsigned int order = get_order(size);
+       struct page *page;
+       unsigned long addr;
+
+       page = alloc_pages_node(node, flags | __GFP_ZERO, order);
+       if (!page)
+               return NULL;
+       addr = (unsigned long)page_address(page);
+       if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) {
+               __free_pages(page, order);
+               addr = 0;
+       }
+       return (void *)addr;
+#else
+       return kmalloc_node(size, flags | __GFP_ZERO, node);
+#endif
+}
+
+static void dsfree(const void *buffer, size_t size)
+{
+#ifdef CONFIG_KAISER
+       if (!buffer)
+               return;
+       kaiser_remove_mapping((unsigned long)buffer, size);
+       free_pages((unsigned long)buffer, get_order(size));
+#else
+       kfree(buffer);
+#endif
+}
+
 static int alloc_pebs_buffer(int cpu)
 {
        struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -70,7 +107,7 @@ static int alloc_pebs_buffer(int cpu)
        if (!x86_pmu.pebs)
                return 0;
 
-       buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       buffer = dsalloc(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
        if (unlikely(!buffer))
                return -ENOMEM;
 
@@ -94,7 +131,7 @@ static void release_pebs_buffer(int cpu)
        if (!ds || !x86_pmu.pebs)
                return;
 
-       kfree((void *)(unsigned long)ds->pebs_buffer_base);
+       dsfree((void *)(unsigned long)ds->pebs_buffer_base, PEBS_BUFFER_SIZE);
        ds->pebs_buffer_base = 0;
 }
 
@@ -108,7 +145,7 @@ static int alloc_bts_buffer(int cpu)
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL, node);
        if (unlikely(!buffer))
                return -ENOMEM;
 
@@ -132,19 +169,15 @@ static void release_bts_buffer(int cpu)
        if (!ds || !x86_pmu.bts)
                return;
 
-       kfree((void *)(unsigned long)ds->bts_buffer_base);
+       dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
        ds->bts_buffer_base = 0;
 }
 
 static int alloc_ds_buffer(int cpu)
 {
-       int node = cpu_to_node(cpu);
-       struct debug_store *ds;
-
-       ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
-       if (unlikely(!ds))
-               return -ENOMEM;
+       struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
 
+       memset(ds, 0, sizeof(*ds));
        per_cpu(cpu_hw_events, cpu).ds = ds;
 
        return 0;
@@ -158,7 +191,6 @@ static void release_ds_buffer(int cpu)
                return;
 
        per_cpu(cpu_hw_events, cpu).ds = NULL;
-       kfree(ds);
 }
 
 void release_ds_buffers(void)