Merge branch 'drm-ttm-unmappable' into drm-core-next
[pandora-kernel.git] / kernel / trace / ring_buffer.c
index 05a9f83..41ca394 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/list.h>
@@ -207,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 #define RB_EVNT_MIN_SIZE       8U      /* two 32bit words */
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+# define RB_FORCE_8BYTE_ALIGNMENT      0
+# define RB_ARCH_ALIGNMENT             RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT      1
+# define RB_ARCH_ALIGNMENT             8U
+#endif
+
 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 
@@ -1201,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-                       return;
+                       goto out;
                p = cpu_buffer->pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
        if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-               return;
+               goto out;
 
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
 
+out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
@@ -1229,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
-                       return;
+                       goto out;
                p = pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
@@ -1238,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
 
+out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
@@ -1547,7 +1558,7 @@ rb_update_event(struct ring_buffer_event *event,
 
        case 0:
                length -= RB_EVNT_HDR_SIZE;
-               if (length > RB_MAX_SMALL_DATA)
+               if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                        event->array[0] = length;
                else
                        event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1722,11 +1733,11 @@ static unsigned rb_calculate_event_length(unsigned length)
        if (!length)
                length = 1;
 
-       if (length > RB_MAX_SMALL_DATA)
+       if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                length += sizeof(event.array[0]);
 
        length += RB_EVNT_HDR_SIZE;
-       length = ALIGN(length, RB_ALIGNMENT);
+       length = ALIGN(length, RB_ARCH_ALIGNMENT);
 
        return length;
 }