4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
28 * There's three layers that must be on in order to write
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
63 * tracing_on - enable all tracing buffers
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
74 * tracing_off - turn off all tracing buffers
76 * This function stops all tracing buffers from recording data.
77 * It does not disable any overhead the tracers themselves may
78 * be causing. This function simply causes all recording to
79 * the ring buffers to fail.
81 void tracing_off(void)
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
87 * tracing_off_permanent - permanently disable ring buffers
89 * This function, once called, will disable all ring buffers
92 void tracing_off_permanent(void)
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
99 /* Up this if you want to test the TIME_EXTENTS and normalization */
100 #define DEBUG_SHIFT 0
103 u64 ring_buffer_time_stamp(int cpu)
107 preempt_disable_notrace();
108 /* shift to debug/test normalization and TIME_EXTENTS */
109 time = sched_clock() << DEBUG_SHIFT;
110 preempt_enable_notrace();
115 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
117 /* Just stupid testing the normalize function and deltas */
121 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122 #define RB_ALIGNMENT_SHIFT 2
123 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
124 #define RB_MAX_SMALL_DATA 28
127 RB_LEN_TIME_EXTEND = 8,
128 RB_LEN_TIME_STAMP = 16,
131 /* inline for ring buffer fast paths */
132 static inline unsigned
133 rb_event_length(struct ring_buffer_event *event)
137 switch (event->type) {
138 case RINGBUF_TYPE_PADDING:
142 case RINGBUF_TYPE_TIME_EXTEND:
143 return RB_LEN_TIME_EXTEND;
145 case RINGBUF_TYPE_TIME_STAMP:
146 return RB_LEN_TIME_STAMP;
148 case RINGBUF_TYPE_DATA:
150 length = event->len << RB_ALIGNMENT_SHIFT;
152 length = event->array[0];
153 return length + RB_EVNT_HDR_SIZE;
162 * ring_buffer_event_length - return the length of the event
163 * @event: the event to get the length of
165 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
167 return rb_event_length(event);
170 /* inline for ring buffer fast paths */
172 rb_event_data(struct ring_buffer_event *event)
174 BUG_ON(event->type != RINGBUF_TYPE_DATA);
175 /* If length is in len field, then array[0] has the data */
177 return (void *)&event->array[0];
178 /* Otherwise length is in array[0] and array[1] has the data */
179 return (void *)&event->array[1];
183 * ring_buffer_event_data - return the data of the event
184 * @event: the event to get the data from
186 void *ring_buffer_event_data(struct ring_buffer_event *event)
188 return rb_event_data(event);
191 #define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask)
195 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
196 #define TS_DELTA_TEST (~TS_MASK)
199 * This hack stolen from mm/slob.c.
200 * We can store per page timing information in the page frame of the page.
201 * Thanks to Peter Zijlstra for suggesting this idea.
204 u64 time_stamp; /* page time stamp */
205 local_t write; /* index for next write */
206 local_t commit; /* write commited index */
207 unsigned read; /* index for next read */
208 struct list_head list; /* list of free pages */
209 void *page; /* Actual data page */
213 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
216 static inline void free_buffer_page(struct buffer_page *bpage)
219 free_page((unsigned long)bpage->page);
224 * We need to fit the time_stamp delta into 27 bits.
226 static inline int test_time_stamp(u64 delta)
228 if (delta & TS_DELTA_TEST)
233 #define BUF_PAGE_SIZE PAGE_SIZE
236 * head_page == tail_page && head == tail then buffer is empty.
238 struct ring_buffer_per_cpu {
240 struct ring_buffer *buffer;
241 spinlock_t reader_lock; /* serialize readers */
243 struct lock_class_key lock_key;
244 struct list_head pages;
245 struct buffer_page *head_page; /* read from head */
246 struct buffer_page *tail_page; /* write to tail */
247 struct buffer_page *commit_page; /* commited pages */
248 struct buffer_page *reader_page;
249 unsigned long overrun;
250 unsigned long entries;
253 atomic_t record_disabled;
262 atomic_t record_disabled;
266 struct ring_buffer_per_cpu **buffers;
269 struct ring_buffer_iter {
270 struct ring_buffer_per_cpu *cpu_buffer;
272 struct buffer_page *head_page;
276 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
277 #define RB_WARN_ON(buffer, cond) \
279 int _____ret = unlikely(cond); \
281 atomic_inc(&buffer->record_disabled); \
288 * check_pages - integrity check of buffer pages
289 * @cpu_buffer: CPU buffer with pages to test
291 * As a safty measure we check to make sure the data pages have not
294 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
296 struct list_head *head = &cpu_buffer->pages;
297 struct buffer_page *page, *tmp;
299 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
301 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
304 list_for_each_entry_safe(page, tmp, head, list) {
305 if (RB_WARN_ON(cpu_buffer,
306 page->list.next->prev != &page->list))
308 if (RB_WARN_ON(cpu_buffer,
309 page->list.prev->next != &page->list))
316 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
319 struct list_head *head = &cpu_buffer->pages;
320 struct buffer_page *page, *tmp;
325 for (i = 0; i < nr_pages; i++) {
326 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
327 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
330 list_add(&page->list, &pages);
332 addr = __get_free_page(GFP_KERNEL);
335 page->page = (void *)addr;
338 list_splice(&pages, head);
340 rb_check_pages(cpu_buffer);
345 list_for_each_entry_safe(page, tmp, &pages, list) {
346 list_del_init(&page->list);
347 free_buffer_page(page);
352 static struct ring_buffer_per_cpu *
353 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
355 struct ring_buffer_per_cpu *cpu_buffer;
356 struct buffer_page *page;
360 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
361 GFP_KERNEL, cpu_to_node(cpu));
365 cpu_buffer->cpu = cpu;
366 cpu_buffer->buffer = buffer;
367 spin_lock_init(&cpu_buffer->reader_lock);
368 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
369 INIT_LIST_HEAD(&cpu_buffer->pages);
371 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
372 GFP_KERNEL, cpu_to_node(cpu));
374 goto fail_free_buffer;
376 cpu_buffer->reader_page = page;
377 addr = __get_free_page(GFP_KERNEL);
379 goto fail_free_reader;
380 page->page = (void *)addr;
382 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
384 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
386 goto fail_free_reader;
388 cpu_buffer->head_page
389 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
390 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
395 free_buffer_page(cpu_buffer->reader_page);
402 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
404 struct list_head *head = &cpu_buffer->pages;
405 struct buffer_page *page, *tmp;
407 list_del_init(&cpu_buffer->reader_page->list);
408 free_buffer_page(cpu_buffer->reader_page);
410 list_for_each_entry_safe(page, tmp, head, list) {
411 list_del_init(&page->list);
412 free_buffer_page(page);
418 * Causes compile errors if the struct buffer_page gets bigger
419 * than the struct page.
421 extern int ring_buffer_page_too_big(void);
424 * ring_buffer_alloc - allocate a new ring_buffer
425 * @size: the size in bytes that is needed.
426 * @flags: attributes to set for the ring buffer.
428 * Currently the only flag that is available is the RB_FL_OVERWRITE
429 * flag. This flag means that the buffer will overwrite old data
430 * when the buffer wraps. If this flag is not set, the buffer will
431 * drop data when the tail hits the head.
433 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
435 struct ring_buffer *buffer;
439 /* Paranoid! Optimizes out when all is well */
440 if (sizeof(struct buffer_page) > sizeof(struct page))
441 ring_buffer_page_too_big();
444 /* keep it in its own cache line */
445 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
450 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
451 buffer->flags = flags;
453 /* need at least two pages */
454 if (buffer->pages == 1)
457 buffer->cpumask = cpu_possible_map;
458 buffer->cpus = nr_cpu_ids;
460 bsize = sizeof(void *) * nr_cpu_ids;
461 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
463 if (!buffer->buffers)
464 goto fail_free_buffer;
466 for_each_buffer_cpu(buffer, cpu) {
467 buffer->buffers[cpu] =
468 rb_allocate_cpu_buffer(buffer, cpu);
469 if (!buffer->buffers[cpu])
470 goto fail_free_buffers;
473 mutex_init(&buffer->mutex);
478 for_each_buffer_cpu(buffer, cpu) {
479 if (buffer->buffers[cpu])
480 rb_free_cpu_buffer(buffer->buffers[cpu]);
482 kfree(buffer->buffers);
490 * ring_buffer_free - free a ring buffer.
491 * @buffer: the buffer to free.
494 ring_buffer_free(struct ring_buffer *buffer)
498 for_each_buffer_cpu(buffer, cpu)
499 rb_free_cpu_buffer(buffer->buffers[cpu]);
504 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
507 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
509 struct buffer_page *page;
513 atomic_inc(&cpu_buffer->record_disabled);
516 for (i = 0; i < nr_pages; i++) {
517 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
519 p = cpu_buffer->pages.next;
520 page = list_entry(p, struct buffer_page, list);
521 list_del_init(&page->list);
522 free_buffer_page(page);
524 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
527 rb_reset_cpu(cpu_buffer);
529 rb_check_pages(cpu_buffer);
531 atomic_dec(&cpu_buffer->record_disabled);
536 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
537 struct list_head *pages, unsigned nr_pages)
539 struct buffer_page *page;
543 atomic_inc(&cpu_buffer->record_disabled);
546 for (i = 0; i < nr_pages; i++) {
547 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
550 page = list_entry(p, struct buffer_page, list);
551 list_del_init(&page->list);
552 list_add_tail(&page->list, &cpu_buffer->pages);
554 rb_reset_cpu(cpu_buffer);
556 rb_check_pages(cpu_buffer);
558 atomic_dec(&cpu_buffer->record_disabled);
562 * ring_buffer_resize - resize the ring buffer
563 * @buffer: the buffer to resize.
564 * @size: the new size.
566 * The tracer is responsible for making sure that the buffer is
567 * not being used while changing the size.
568 * Note: We may be able to change the above requirement by using
569 * RCU synchronizations.
571 * Minimum size is 2 * BUF_PAGE_SIZE.
573 * Returns -1 on failure.
575 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
577 struct ring_buffer_per_cpu *cpu_buffer;
578 unsigned nr_pages, rm_pages, new_pages;
579 struct buffer_page *page, *tmp;
580 unsigned long buffer_size;
586 * Always succeed at resizing a non-existent buffer:
591 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
592 size *= BUF_PAGE_SIZE;
593 buffer_size = buffer->pages * BUF_PAGE_SIZE;
595 /* we need a minimum of two pages */
596 if (size < BUF_PAGE_SIZE * 2)
597 size = BUF_PAGE_SIZE * 2;
599 if (size == buffer_size)
602 mutex_lock(&buffer->mutex);
604 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
606 if (size < buffer_size) {
608 /* easy case, just free pages */
609 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
610 mutex_unlock(&buffer->mutex);
614 rm_pages = buffer->pages - nr_pages;
616 for_each_buffer_cpu(buffer, cpu) {
617 cpu_buffer = buffer->buffers[cpu];
618 rb_remove_pages(cpu_buffer, rm_pages);
624 * This is a bit more difficult. We only want to add pages
625 * when we can allocate enough for all CPUs. We do this
626 * by allocating all the pages and storing them on a local
627 * link list. If we succeed in our allocation, then we
628 * add these pages to the cpu_buffers. Otherwise we just free
629 * them all and return -ENOMEM;
631 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
632 mutex_unlock(&buffer->mutex);
636 new_pages = nr_pages - buffer->pages;
638 for_each_buffer_cpu(buffer, cpu) {
639 for (i = 0; i < new_pages; i++) {
640 page = kzalloc_node(ALIGN(sizeof(*page),
642 GFP_KERNEL, cpu_to_node(cpu));
645 list_add(&page->list, &pages);
646 addr = __get_free_page(GFP_KERNEL);
649 page->page = (void *)addr;
653 for_each_buffer_cpu(buffer, cpu) {
654 cpu_buffer = buffer->buffers[cpu];
655 rb_insert_pages(cpu_buffer, &pages, new_pages);
658 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
659 mutex_unlock(&buffer->mutex);
664 buffer->pages = nr_pages;
665 mutex_unlock(&buffer->mutex);
670 list_for_each_entry_safe(page, tmp, &pages, list) {
671 list_del_init(&page->list);
672 free_buffer_page(page);
674 mutex_unlock(&buffer->mutex);
678 static inline int rb_null_event(struct ring_buffer_event *event)
680 return event->type == RINGBUF_TYPE_PADDING;
683 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
685 return page->page + index;
688 static inline struct ring_buffer_event *
689 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
691 return __rb_page_index(cpu_buffer->reader_page,
692 cpu_buffer->reader_page->read);
695 static inline struct ring_buffer_event *
696 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
698 return __rb_page_index(cpu_buffer->head_page,
699 cpu_buffer->head_page->read);
702 static inline struct ring_buffer_event *
703 rb_iter_head_event(struct ring_buffer_iter *iter)
705 return __rb_page_index(iter->head_page, iter->head);
708 static inline unsigned rb_page_write(struct buffer_page *bpage)
710 return local_read(&bpage->write);
713 static inline unsigned rb_page_commit(struct buffer_page *bpage)
715 return local_read(&bpage->commit);
718 /* Size is determined by what has been commited */
719 static inline unsigned rb_page_size(struct buffer_page *bpage)
721 return rb_page_commit(bpage);
724 static inline unsigned
725 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
727 return rb_page_commit(cpu_buffer->commit_page);
730 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
732 return rb_page_commit(cpu_buffer->head_page);
736 * When the tail hits the head and the buffer is in overwrite mode,
737 * the head jumps to the next page and all content on the previous
738 * page is discarded. But before doing so, we update the overrun
739 * variable of the buffer.
741 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
743 struct ring_buffer_event *event;
746 for (head = 0; head < rb_head_size(cpu_buffer);
747 head += rb_event_length(event)) {
749 event = __rb_page_index(cpu_buffer->head_page, head);
750 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
752 /* Only count data entries */
753 if (event->type != RINGBUF_TYPE_DATA)
755 cpu_buffer->overrun++;
756 cpu_buffer->entries--;
760 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
761 struct buffer_page **page)
763 struct list_head *p = (*page)->list.next;
765 if (p == &cpu_buffer->pages)
768 *page = list_entry(p, struct buffer_page, list);
771 static inline unsigned
772 rb_event_index(struct ring_buffer_event *event)
774 unsigned long addr = (unsigned long)event;
776 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
780 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
781 struct ring_buffer_event *event)
783 unsigned long addr = (unsigned long)event;
786 index = rb_event_index(event);
789 return cpu_buffer->commit_page->page == (void *)addr &&
790 rb_commit_index(cpu_buffer) == index;
794 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
795 struct ring_buffer_event *event)
797 unsigned long addr = (unsigned long)event;
800 index = rb_event_index(event);
803 while (cpu_buffer->commit_page->page != (void *)addr) {
804 if (RB_WARN_ON(cpu_buffer,
805 cpu_buffer->commit_page == cpu_buffer->tail_page))
807 cpu_buffer->commit_page->commit =
808 cpu_buffer->commit_page->write;
809 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
810 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
813 /* Now set the commit to the event's index */
814 local_set(&cpu_buffer->commit_page->commit, index);
818 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
821 * We only race with interrupts and NMIs on this CPU.
822 * If we own the commit event, then we can commit
823 * all others that interrupted us, since the interruptions
824 * are in stack format (they finish before they come
825 * back to us). This allows us to do a simple loop to
826 * assign the commit to the tail.
828 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
829 cpu_buffer->commit_page->commit =
830 cpu_buffer->commit_page->write;
831 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
832 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
833 /* add barrier to keep gcc from optimizing too much */
836 while (rb_commit_index(cpu_buffer) !=
837 rb_page_write(cpu_buffer->commit_page)) {
838 cpu_buffer->commit_page->commit =
839 cpu_buffer->commit_page->write;
844 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
846 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
847 cpu_buffer->reader_page->read = 0;
850 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
852 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
855 * The iterator could be on the reader page (it starts there).
856 * But the head could have moved, since the reader was
857 * found. Check for this case and assign the iterator
858 * to the head page instead of next.
860 if (iter->head_page == cpu_buffer->reader_page)
861 iter->head_page = cpu_buffer->head_page;
863 rb_inc_page(cpu_buffer, &iter->head_page);
865 iter->read_stamp = iter->head_page->time_stamp;
870 * ring_buffer_update_event - update event type and data
871 * @event: the even to update
872 * @type: the type of event
873 * @length: the size of the event field in the ring buffer
875 * Update the type and data fields of the event. The length
876 * is the actual size that is written to the ring buffer,
877 * and with this, we can determine what to place into the
881 rb_update_event(struct ring_buffer_event *event,
882 unsigned type, unsigned length)
888 case RINGBUF_TYPE_PADDING:
891 case RINGBUF_TYPE_TIME_EXTEND:
893 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
894 >> RB_ALIGNMENT_SHIFT;
897 case RINGBUF_TYPE_TIME_STAMP:
899 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
900 >> RB_ALIGNMENT_SHIFT;
903 case RINGBUF_TYPE_DATA:
904 length -= RB_EVNT_HDR_SIZE;
905 if (length > RB_MAX_SMALL_DATA) {
907 event->array[0] = length;
910 (length + (RB_ALIGNMENT-1))
911 >> RB_ALIGNMENT_SHIFT;
918 static inline unsigned rb_calculate_event_length(unsigned length)
920 struct ring_buffer_event event; /* Used only for sizeof array */
922 /* zero length can cause confusions */
926 if (length > RB_MAX_SMALL_DATA)
927 length += sizeof(event.array[0]);
929 length += RB_EVNT_HDR_SIZE;
930 length = ALIGN(length, RB_ALIGNMENT);
935 static struct ring_buffer_event *
936 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
937 unsigned type, unsigned long length, u64 *ts)
939 struct buffer_page *tail_page, *head_page, *reader_page;
940 unsigned long tail, write;
941 struct ring_buffer *buffer = cpu_buffer->buffer;
942 struct ring_buffer_event *event;
945 tail_page = cpu_buffer->tail_page;
946 write = local_add_return(length, &tail_page->write);
947 tail = write - length;
949 /* See if we shot pass the end of this buffer page */
950 if (write > BUF_PAGE_SIZE) {
951 struct buffer_page *next_page = tail_page;
953 local_irq_save(flags);
954 __raw_spin_lock(&cpu_buffer->lock);
956 rb_inc_page(cpu_buffer, &next_page);
958 head_page = cpu_buffer->head_page;
959 reader_page = cpu_buffer->reader_page;
961 /* we grabbed the lock before incrementing */
962 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
966 * If for some reason, we had an interrupt storm that made
967 * it all the way around the buffer, bail, and warn
970 if (unlikely(next_page == cpu_buffer->commit_page)) {
975 if (next_page == head_page) {
976 if (!(buffer->flags & RB_FL_OVERWRITE)) {
978 if (tail <= BUF_PAGE_SIZE)
979 local_set(&tail_page->write, tail);
983 /* tail_page has not moved yet? */
984 if (tail_page == cpu_buffer->tail_page) {
985 /* count overflows */
986 rb_update_overflow(cpu_buffer);
988 rb_inc_page(cpu_buffer, &head_page);
989 cpu_buffer->head_page = head_page;
990 cpu_buffer->head_page->read = 0;
995 * If the tail page is still the same as what we think
996 * it is, then it is up to us to update the tail
999 if (tail_page == cpu_buffer->tail_page) {
1000 local_set(&next_page->write, 0);
1001 local_set(&next_page->commit, 0);
1002 cpu_buffer->tail_page = next_page;
1004 /* reread the time stamp */
1005 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1006 cpu_buffer->tail_page->time_stamp = *ts;
1010 * The actual tail page has moved forward.
1012 if (tail < BUF_PAGE_SIZE) {
1013 /* Mark the rest of the page with padding */
1014 event = __rb_page_index(tail_page, tail);
1015 event->type = RINGBUF_TYPE_PADDING;
1018 if (tail <= BUF_PAGE_SIZE)
1019 /* Set the write back to the previous setting */
1020 local_set(&tail_page->write, tail);
1023 * If this was a commit entry that failed,
1024 * increment that too
1026 if (tail_page == cpu_buffer->commit_page &&
1027 tail == rb_commit_index(cpu_buffer)) {
1028 rb_set_commit_to_write(cpu_buffer);
1031 __raw_spin_unlock(&cpu_buffer->lock);
1032 local_irq_restore(flags);
1034 /* fail and let the caller try again */
1035 return ERR_PTR(-EAGAIN);
1038 /* We reserved something on the buffer */
1040 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1043 event = __rb_page_index(tail_page, tail);
1044 rb_update_event(event, type, length);
1047 * If this is a commit and the tail is zero, then update
1048 * this page's time stamp.
1050 if (!tail && rb_is_commit(cpu_buffer, event))
1051 cpu_buffer->commit_page->time_stamp = *ts;
1056 __raw_spin_unlock(&cpu_buffer->lock);
1057 local_irq_restore(flags);
1062 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1063 u64 *ts, u64 *delta)
1065 struct ring_buffer_event *event;
1069 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1070 printk(KERN_WARNING "Delta way too big! %llu"
1071 " ts=%llu write stamp = %llu\n",
1072 (unsigned long long)*delta,
1073 (unsigned long long)*ts,
1074 (unsigned long long)cpu_buffer->write_stamp);
1079 * The delta is too big, we to add a
1082 event = __rb_reserve_next(cpu_buffer,
1083 RINGBUF_TYPE_TIME_EXTEND,
1089 if (PTR_ERR(event) == -EAGAIN)
1092 /* Only a commited time event can update the write stamp */
1093 if (rb_is_commit(cpu_buffer, event)) {
1095 * If this is the first on the page, then we need to
1096 * update the page itself, and just put in a zero.
1098 if (rb_event_index(event)) {
1099 event->time_delta = *delta & TS_MASK;
1100 event->array[0] = *delta >> TS_SHIFT;
1102 cpu_buffer->commit_page->time_stamp = *ts;
1103 event->time_delta = 0;
1104 event->array[0] = 0;
1106 cpu_buffer->write_stamp = *ts;
1107 /* let the caller know this was the commit */
1110 /* Darn, this is just wasted space */
1111 event->time_delta = 0;
1112 event->array[0] = 0;
1121 static struct ring_buffer_event *
1122 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1123 unsigned type, unsigned long length)
1125 struct ring_buffer_event *event;
1132 * We allow for interrupts to reenter here and do a trace.
1133 * If one does, it will cause this original code to loop
1134 * back here. Even with heavy interrupts happening, this
1135 * should only happen a few times in a row. If this happens
1136 * 1000 times in a row, there must be either an interrupt
1137 * storm or we have something buggy.
1140 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1143 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1146 * Only the first commit can update the timestamp.
1147 * Yes there is a race here. If an interrupt comes in
1148 * just after the conditional and it traces too, then it
1149 * will also check the deltas. More than one timestamp may
1150 * also be made. But only the entry that did the actual
1151 * commit will be something other than zero.
1153 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1154 rb_page_write(cpu_buffer->tail_page) ==
1155 rb_commit_index(cpu_buffer)) {
1157 delta = ts - cpu_buffer->write_stamp;
1159 /* make sure this delta is calculated here */
1162 /* Did the write stamp get updated already? */
1163 if (unlikely(ts < cpu_buffer->write_stamp))
1166 if (test_time_stamp(delta)) {
1168 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1170 if (commit == -EBUSY)
1173 if (commit == -EAGAIN)
1176 RB_WARN_ON(cpu_buffer, commit < 0);
1179 /* Non commits have zero deltas */
1182 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1183 if (PTR_ERR(event) == -EAGAIN)
1187 if (unlikely(commit))
1189 * Ouch! We needed a timestamp and it was commited. But
1190 * we didn't get our event reserved.
1192 rb_set_commit_to_write(cpu_buffer);
1197 * If the timestamp was commited, make the commit our entry
1198 * now so that we will update it when needed.
1201 rb_set_commit_event(cpu_buffer, event);
1202 else if (!rb_is_commit(cpu_buffer, event))
1205 event->time_delta = delta;
1210 static DEFINE_PER_CPU(int, rb_need_resched);
1213 * ring_buffer_lock_reserve - reserve a part of the buffer
1214 * @buffer: the ring buffer to reserve from
1215 * @length: the length of the data to reserve (excluding event header)
1216 * @flags: a pointer to save the interrupt flags
1218 * Returns a reseverd event on the ring buffer to copy directly to.
1219 * The user of this interface will need to get the body to write into
1220 * and can use the ring_buffer_event_data() interface.
1222 * The length is the length of the data needed, not the event length
1223 * which also includes the event header.
1225 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1226 * If NULL is returned, then nothing has been allocated or locked.
1228 struct ring_buffer_event *
1229 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1230 unsigned long length,
1231 unsigned long *flags)
1233 struct ring_buffer_per_cpu *cpu_buffer;
1234 struct ring_buffer_event *event;
1237 if (ring_buffer_flags != RB_BUFFERS_ON)
1240 if (atomic_read(&buffer->record_disabled))
1243 /* If we are tracing schedule, we don't want to recurse */
1244 resched = ftrace_preempt_disable();
1246 cpu = raw_smp_processor_id();
1248 if (!cpu_isset(cpu, buffer->cpumask))
1251 cpu_buffer = buffer->buffers[cpu];
1253 if (atomic_read(&cpu_buffer->record_disabled))
1256 length = rb_calculate_event_length(length);
1257 if (length > BUF_PAGE_SIZE)
1260 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1265 * Need to store resched state on this cpu.
1266 * Only the first needs to.
1269 if (preempt_count() == 1)
1270 per_cpu(rb_need_resched, cpu) = resched;
1275 ftrace_preempt_enable(resched);
1279 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1280 struct ring_buffer_event *event)
1282 cpu_buffer->entries++;
1284 /* Only process further if we own the commit */
1285 if (!rb_is_commit(cpu_buffer, event))
1288 cpu_buffer->write_stamp += event->time_delta;
1290 rb_set_commit_to_write(cpu_buffer);
1294 * ring_buffer_unlock_commit - commit a reserved
1295 * @buffer: The buffer to commit to
1296 * @event: The event pointer to commit.
1297 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1299 * This commits the data to the ring buffer, and releases any locks held.
1301 * Must be paired with ring_buffer_lock_reserve.
1303 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1304 struct ring_buffer_event *event,
1305 unsigned long flags)
1307 struct ring_buffer_per_cpu *cpu_buffer;
1308 int cpu = raw_smp_processor_id();
1310 cpu_buffer = buffer->buffers[cpu];
1312 rb_commit(cpu_buffer, event);
1315 * Only the last preempt count needs to restore preemption.
1317 if (preempt_count() == 1)
1318 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1320 preempt_enable_no_resched_notrace();
1326 * ring_buffer_write - write data to the buffer without reserving
1327 * @buffer: The ring buffer to write to.
1328 * @length: The length of the data being written (excluding the event header)
1329 * @data: The data to write to the buffer.
1331 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1332 * one function. If you already have the data to write to the buffer, it
1333 * may be easier to simply call this function.
1335 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1336 * and not the length of the event which would hold the header.
1338 int ring_buffer_write(struct ring_buffer *buffer,
1339 unsigned long length,
1342 struct ring_buffer_per_cpu *cpu_buffer;
1343 struct ring_buffer_event *event;
1344 unsigned long event_length;
1349 if (ring_buffer_flags != RB_BUFFERS_ON)
1352 if (atomic_read(&buffer->record_disabled))
1355 resched = ftrace_preempt_disable();
1357 cpu = raw_smp_processor_id();
1359 if (!cpu_isset(cpu, buffer->cpumask))
1362 cpu_buffer = buffer->buffers[cpu];
1364 if (atomic_read(&cpu_buffer->record_disabled))
1367 event_length = rb_calculate_event_length(length);
1368 event = rb_reserve_next_event(cpu_buffer,
1369 RINGBUF_TYPE_DATA, event_length);
1373 body = rb_event_data(event);
1375 memcpy(body, data, length);
1377 rb_commit(cpu_buffer, event);
1381 ftrace_preempt_enable(resched);
1386 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1388 struct buffer_page *reader = cpu_buffer->reader_page;
1389 struct buffer_page *head = cpu_buffer->head_page;
1390 struct buffer_page *commit = cpu_buffer->commit_page;
1392 return reader->read == rb_page_commit(reader) &&
1393 (commit == reader ||
1395 head->read == rb_page_commit(commit)));
1399 * ring_buffer_record_disable - stop all writes into the buffer
1400 * @buffer: The ring buffer to stop writes to.
1402 * This prevents all writes to the buffer. Any attempt to write
1403 * to the buffer after this will fail and return NULL.
1405 * The caller should call synchronize_sched() after this.
1407 void ring_buffer_record_disable(struct ring_buffer *buffer)
1409 atomic_inc(&buffer->record_disabled);
1413 * ring_buffer_record_enable - enable writes to the buffer
1414 * @buffer: The ring buffer to enable writes
1416 * Note, multiple disables will need the same number of enables
1417 * to truely enable the writing (much like preempt_disable).
1419 void ring_buffer_record_enable(struct ring_buffer *buffer)
1421 atomic_dec(&buffer->record_disabled);
1425 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1426 * @buffer: The ring buffer to stop writes to.
1427 * @cpu: The CPU buffer to stop
1429 * This prevents all writes to the buffer. Any attempt to write
1430 * to the buffer after this will fail and return NULL.
1432 * The caller should call synchronize_sched() after this.
1434 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1436 struct ring_buffer_per_cpu *cpu_buffer;
1438 if (!cpu_isset(cpu, buffer->cpumask))
1441 cpu_buffer = buffer->buffers[cpu];
1442 atomic_inc(&cpu_buffer->record_disabled);
1446 * ring_buffer_record_enable_cpu - enable writes to the buffer
1447 * @buffer: The ring buffer to enable writes
1448 * @cpu: The CPU to enable.
1450 * Note, multiple disables will need the same number of enables
1451 * to truely enable the writing (much like preempt_disable).
1453 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1455 struct ring_buffer_per_cpu *cpu_buffer;
1457 if (!cpu_isset(cpu, buffer->cpumask))
1460 cpu_buffer = buffer->buffers[cpu];
1461 atomic_dec(&cpu_buffer->record_disabled);
1465 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1466 * @buffer: The ring buffer
1467 * @cpu: The per CPU buffer to get the entries from.
1469 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1471 struct ring_buffer_per_cpu *cpu_buffer;
1473 if (!cpu_isset(cpu, buffer->cpumask))
1476 cpu_buffer = buffer->buffers[cpu];
1477 return cpu_buffer->entries;
1481 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1482 * @buffer: The ring buffer
1483 * @cpu: The per CPU buffer to get the number of overruns from
1485 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1487 struct ring_buffer_per_cpu *cpu_buffer;
1489 if (!cpu_isset(cpu, buffer->cpumask))
1492 cpu_buffer = buffer->buffers[cpu];
1493 return cpu_buffer->overrun;
1497 * ring_buffer_entries - get the number of entries in a buffer
1498 * @buffer: The ring buffer
1500 * Returns the total number of entries in the ring buffer
1503 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1505 struct ring_buffer_per_cpu *cpu_buffer;
1506 unsigned long entries = 0;
1509 /* if you care about this being correct, lock the buffer */
1510 for_each_buffer_cpu(buffer, cpu) {
1511 cpu_buffer = buffer->buffers[cpu];
1512 entries += cpu_buffer->entries;
1519 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1520 * @buffer: The ring buffer
1522 * Returns the total number of overruns in the ring buffer
1525 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1527 struct ring_buffer_per_cpu *cpu_buffer;
1528 unsigned long overruns = 0;
1531 /* if you care about this being correct, lock the buffer */
1532 for_each_buffer_cpu(buffer, cpu) {
1533 cpu_buffer = buffer->buffers[cpu];
1534 overruns += cpu_buffer->overrun;
1540 static void rb_iter_reset(struct ring_buffer_iter *iter)
1542 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1544 /* Iterator usage is expected to have record disabled */
1545 if (list_empty(&cpu_buffer->reader_page->list)) {
1546 iter->head_page = cpu_buffer->head_page;
1547 iter->head = cpu_buffer->head_page->read;
1549 iter->head_page = cpu_buffer->reader_page;
1550 iter->head = cpu_buffer->reader_page->read;
1553 iter->read_stamp = cpu_buffer->read_stamp;
1555 iter->read_stamp = iter->head_page->time_stamp;
1559 * ring_buffer_iter_reset - reset an iterator
1560 * @iter: The iterator to reset
1562 * Resets the iterator, so that it will start from the beginning
1565 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1567 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1568 unsigned long flags;
1570 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1571 rb_iter_reset(iter);
1572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1576 * ring_buffer_iter_empty - check if an iterator has no more to read
1577 * @iter: The iterator to check
1579 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1581 struct ring_buffer_per_cpu *cpu_buffer;
1583 cpu_buffer = iter->cpu_buffer;
1585 return iter->head_page == cpu_buffer->commit_page &&
1586 iter->head == rb_commit_index(cpu_buffer);
1590 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1591 struct ring_buffer_event *event)
1595 switch (event->type) {
1596 case RINGBUF_TYPE_PADDING:
1599 case RINGBUF_TYPE_TIME_EXTEND:
1600 delta = event->array[0];
1602 delta += event->time_delta;
1603 cpu_buffer->read_stamp += delta;
1606 case RINGBUF_TYPE_TIME_STAMP:
1607 /* FIXME: not implemented */
1610 case RINGBUF_TYPE_DATA:
1611 cpu_buffer->read_stamp += event->time_delta;
1621 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1622 struct ring_buffer_event *event)
1626 switch (event->type) {
1627 case RINGBUF_TYPE_PADDING:
1630 case RINGBUF_TYPE_TIME_EXTEND:
1631 delta = event->array[0];
1633 delta += event->time_delta;
1634 iter->read_stamp += delta;
1637 case RINGBUF_TYPE_TIME_STAMP:
1638 /* FIXME: not implemented */
1641 case RINGBUF_TYPE_DATA:
1642 iter->read_stamp += event->time_delta;
1651 static struct buffer_page *
1652 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1654 struct buffer_page *reader = NULL;
1655 unsigned long flags;
1658 local_irq_save(flags);
1659 __raw_spin_lock(&cpu_buffer->lock);
1663 * This should normally only loop twice. But because the
1664 * start of the reader inserts an empty page, it causes
1665 * a case where we will loop three times. There should be no
1666 * reason to loop four times (that I know of).
1668 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1673 reader = cpu_buffer->reader_page;
1675 /* If there's more to read, return this page */
1676 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1679 /* Never should we have an index greater than the size */
1680 if (RB_WARN_ON(cpu_buffer,
1681 cpu_buffer->reader_page->read > rb_page_size(reader)))
1684 /* check if we caught up to the tail */
1686 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1690 * Splice the empty reader page into the list around the head.
1691 * Reset the reader page to size zero.
1694 reader = cpu_buffer->head_page;
1695 cpu_buffer->reader_page->list.next = reader->list.next;
1696 cpu_buffer->reader_page->list.prev = reader->list.prev;
1698 local_set(&cpu_buffer->reader_page->write, 0);
1699 local_set(&cpu_buffer->reader_page->commit, 0);
1701 /* Make the reader page now replace the head */
1702 reader->list.prev->next = &cpu_buffer->reader_page->list;
1703 reader->list.next->prev = &cpu_buffer->reader_page->list;
1706 * If the tail is on the reader, then we must set the head
1707 * to the inserted page, otherwise we set it one before.
1709 cpu_buffer->head_page = cpu_buffer->reader_page;
1711 if (cpu_buffer->commit_page != reader)
1712 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1714 /* Finally update the reader page to the new head */
1715 cpu_buffer->reader_page = reader;
1716 rb_reset_reader_page(cpu_buffer);
1721 __raw_spin_unlock(&cpu_buffer->lock);
1722 local_irq_restore(flags);
1727 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1729 struct ring_buffer_event *event;
1730 struct buffer_page *reader;
1733 reader = rb_get_reader_page(cpu_buffer);
1735 /* This function should not be called when buffer is empty */
1736 if (RB_WARN_ON(cpu_buffer, !reader))
1739 event = rb_reader_event(cpu_buffer);
1741 if (event->type == RINGBUF_TYPE_DATA)
1742 cpu_buffer->entries--;
1744 rb_update_read_stamp(cpu_buffer, event);
1746 length = rb_event_length(event);
1747 cpu_buffer->reader_page->read += length;
1750 static void rb_advance_iter(struct ring_buffer_iter *iter)
1752 struct ring_buffer *buffer;
1753 struct ring_buffer_per_cpu *cpu_buffer;
1754 struct ring_buffer_event *event;
1757 cpu_buffer = iter->cpu_buffer;
1758 buffer = cpu_buffer->buffer;
1761 * Check if we are at the end of the buffer.
1763 if (iter->head >= rb_page_size(iter->head_page)) {
1764 if (RB_WARN_ON(buffer,
1765 iter->head_page == cpu_buffer->commit_page))
1771 event = rb_iter_head_event(iter);
1773 length = rb_event_length(event);
1776 * This should not be called to advance the header if we are
1777 * at the tail of the buffer.
1779 if (RB_WARN_ON(cpu_buffer,
1780 (iter->head_page == cpu_buffer->commit_page) &&
1781 (iter->head + length > rb_commit_index(cpu_buffer))))
1784 rb_update_iter_read_stamp(iter, event);
1786 iter->head += length;
1788 /* check for end of page padding */
1789 if ((iter->head >= rb_page_size(iter->head_page)) &&
1790 (iter->head_page != cpu_buffer->commit_page))
1791 rb_advance_iter(iter);
1794 static struct ring_buffer_event *
1795 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1797 struct ring_buffer_per_cpu *cpu_buffer;
1798 struct ring_buffer_event *event;
1799 struct buffer_page *reader;
1802 if (!cpu_isset(cpu, buffer->cpumask))
1805 cpu_buffer = buffer->buffers[cpu];
1809 * We repeat when a timestamp is encountered. It is possible
1810 * to get multiple timestamps from an interrupt entering just
1811 * as one timestamp is about to be written. The max times
1812 * that this can happen is the number of nested interrupts we
1813 * can have. Nesting 10 deep of interrupts is clearly
1816 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1819 reader = rb_get_reader_page(cpu_buffer);
1823 event = rb_reader_event(cpu_buffer);
1825 switch (event->type) {
1826 case RINGBUF_TYPE_PADDING:
1827 RB_WARN_ON(cpu_buffer, 1);
1828 rb_advance_reader(cpu_buffer);
1831 case RINGBUF_TYPE_TIME_EXTEND:
1832 /* Internal data, OK to advance */
1833 rb_advance_reader(cpu_buffer);
1836 case RINGBUF_TYPE_TIME_STAMP:
1837 /* FIXME: not implemented */
1838 rb_advance_reader(cpu_buffer);
1841 case RINGBUF_TYPE_DATA:
1843 *ts = cpu_buffer->read_stamp + event->time_delta;
1844 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1855 static struct ring_buffer_event *
1856 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1858 struct ring_buffer *buffer;
1859 struct ring_buffer_per_cpu *cpu_buffer;
1860 struct ring_buffer_event *event;
1863 if (ring_buffer_iter_empty(iter))
1866 cpu_buffer = iter->cpu_buffer;
1867 buffer = cpu_buffer->buffer;
1871 * We repeat when a timestamp is encountered. It is possible
1872 * to get multiple timestamps from an interrupt entering just
1873 * as one timestamp is about to be written. The max times
1874 * that this can happen is the number of nested interrupts we
1875 * can have. Nesting 10 deep of interrupts is clearly
1878 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1881 if (rb_per_cpu_empty(cpu_buffer))
1884 event = rb_iter_head_event(iter);
1886 switch (event->type) {
1887 case RINGBUF_TYPE_PADDING:
1891 case RINGBUF_TYPE_TIME_EXTEND:
1892 /* Internal data, OK to advance */
1893 rb_advance_iter(iter);
1896 case RINGBUF_TYPE_TIME_STAMP:
1897 /* FIXME: not implemented */
1898 rb_advance_iter(iter);
1901 case RINGBUF_TYPE_DATA:
1903 *ts = iter->read_stamp + event->time_delta;
1904 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1916 * ring_buffer_peek - peek at the next event to be read
1917 * @buffer: The ring buffer to read
1918 * @cpu: The cpu to peak at
1919 * @ts: The timestamp counter of this event.
1921 * This will return the event that will be read next, but does
1922 * not consume the data.
1924 struct ring_buffer_event *
1925 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1927 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1928 struct ring_buffer_event *event;
1929 unsigned long flags;
1931 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1932 event = rb_buffer_peek(buffer, cpu, ts);
1933 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1939 * ring_buffer_iter_peek - peek at the next event to be read
1940 * @iter: The ring buffer iterator
1941 * @ts: The timestamp counter of this event.
1943 * This will return the event that will be read next, but does
1944 * not increment the iterator.
1946 struct ring_buffer_event *
1947 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1949 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1950 struct ring_buffer_event *event;
1951 unsigned long flags;
1953 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1954 event = rb_iter_peek(iter, ts);
1955 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1961 * ring_buffer_consume - return an event and consume it
1962 * @buffer: The ring buffer to get the next event from
1964 * Returns the next event in the ring buffer, and that event is consumed.
1965 * Meaning, that sequential reads will keep returning a different event,
1966 * and eventually empty the ring buffer if the producer is slower.
1968 struct ring_buffer_event *
1969 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1971 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1972 struct ring_buffer_event *event;
1973 unsigned long flags;
1975 if (!cpu_isset(cpu, buffer->cpumask))
1978 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1980 event = rb_buffer_peek(buffer, cpu, ts);
1984 rb_advance_reader(cpu_buffer);
1987 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1993 * ring_buffer_read_start - start a non consuming read of the buffer
1994 * @buffer: The ring buffer to read from
1995 * @cpu: The cpu buffer to iterate over
1997 * This starts up an iteration through the buffer. It also disables
1998 * the recording to the buffer until the reading is finished.
1999 * This prevents the reading from being corrupted. This is not
2000 * a consuming read, so a producer is not expected.
2002 * Must be paired with ring_buffer_finish.
2004 struct ring_buffer_iter *
2005 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2007 struct ring_buffer_per_cpu *cpu_buffer;
2008 struct ring_buffer_iter *iter;
2009 unsigned long flags;
2011 if (!cpu_isset(cpu, buffer->cpumask))
2014 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2018 cpu_buffer = buffer->buffers[cpu];
2020 iter->cpu_buffer = cpu_buffer;
2022 atomic_inc(&cpu_buffer->record_disabled);
2023 synchronize_sched();
2025 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2026 __raw_spin_lock(&cpu_buffer->lock);
2027 rb_iter_reset(iter);
2028 __raw_spin_unlock(&cpu_buffer->lock);
2029 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2035 * ring_buffer_finish - finish reading the iterator of the buffer
2036 * @iter: The iterator retrieved by ring_buffer_start
2038 * This re-enables the recording to the buffer, and frees the
2042 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2044 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2046 atomic_dec(&cpu_buffer->record_disabled);
2051 * ring_buffer_read - read the next item in the ring buffer by the iterator
2052 * @iter: The ring buffer iterator
2053 * @ts: The time stamp of the event read.
2055 * This reads the next event in the ring buffer and increments the iterator.
2057 struct ring_buffer_event *
2058 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2060 struct ring_buffer_event *event;
2061 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2062 unsigned long flags;
2064 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2065 event = rb_iter_peek(iter, ts);
2069 rb_advance_iter(iter);
2071 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2077 * ring_buffer_size - return the size of the ring buffer (in bytes)
2078 * @buffer: The ring buffer.
2080 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2082 return BUF_PAGE_SIZE * buffer->pages;
2086 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2088 cpu_buffer->head_page
2089 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2090 local_set(&cpu_buffer->head_page->write, 0);
2091 local_set(&cpu_buffer->head_page->commit, 0);
2093 cpu_buffer->head_page->read = 0;
2095 cpu_buffer->tail_page = cpu_buffer->head_page;
2096 cpu_buffer->commit_page = cpu_buffer->head_page;
2098 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2099 local_set(&cpu_buffer->reader_page->write, 0);
2100 local_set(&cpu_buffer->reader_page->commit, 0);
2101 cpu_buffer->reader_page->read = 0;
2103 cpu_buffer->overrun = 0;
2104 cpu_buffer->entries = 0;
2108 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2109 * @buffer: The ring buffer to reset a per cpu buffer of
2110 * @cpu: The CPU buffer to be reset
2112 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2114 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2115 unsigned long flags;
2117 if (!cpu_isset(cpu, buffer->cpumask))
2120 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2122 __raw_spin_lock(&cpu_buffer->lock);
2124 rb_reset_cpu(cpu_buffer);
2126 __raw_spin_unlock(&cpu_buffer->lock);
2128 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2132 * ring_buffer_reset - reset a ring buffer
2133 * @buffer: The ring buffer to reset all cpu buffers
2135 void ring_buffer_reset(struct ring_buffer *buffer)
2139 for_each_buffer_cpu(buffer, cpu)
2140 ring_buffer_reset_cpu(buffer, cpu);
2144 * rind_buffer_empty - is the ring buffer empty?
2145 * @buffer: The ring buffer to test
2147 int ring_buffer_empty(struct ring_buffer *buffer)
2149 struct ring_buffer_per_cpu *cpu_buffer;
2152 /* yes this is racy, but if you don't like the race, lock the buffer */
2153 for_each_buffer_cpu(buffer, cpu) {
2154 cpu_buffer = buffer->buffers[cpu];
2155 if (!rb_per_cpu_empty(cpu_buffer))
2162 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2163 * @buffer: The ring buffer
2164 * @cpu: The CPU buffer to test
2166 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2168 struct ring_buffer_per_cpu *cpu_buffer;
2170 if (!cpu_isset(cpu, buffer->cpumask))
2173 cpu_buffer = buffer->buffers[cpu];
2174 return rb_per_cpu_empty(cpu_buffer);
2178 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2179 * @buffer_a: One buffer to swap with
2180 * @buffer_b: The other buffer to swap with
2182 * This function is useful for tracers that want to take a "snapshot"
2183 * of a CPU buffer and has another back up buffer lying around.
2184 * it is expected that the tracer handles the cpu buffer not being
2185 * used at the moment.
2187 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2188 struct ring_buffer *buffer_b, int cpu)
2190 struct ring_buffer_per_cpu *cpu_buffer_a;
2191 struct ring_buffer_per_cpu *cpu_buffer_b;
2193 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2194 !cpu_isset(cpu, buffer_b->cpumask))
2197 /* At least make sure the two buffers are somewhat the same */
2198 if (buffer_a->size != buffer_b->size ||
2199 buffer_a->pages != buffer_b->pages)
2202 cpu_buffer_a = buffer_a->buffers[cpu];
2203 cpu_buffer_b = buffer_b->buffers[cpu];
2206 * We can't do a synchronize_sched here because this
2207 * function can be called in atomic context.
2208 * Normally this will be called from the same CPU as cpu.
2209 * If not it's up to the caller to protect this.
2211 atomic_inc(&cpu_buffer_a->record_disabled);
2212 atomic_inc(&cpu_buffer_b->record_disabled);
2214 buffer_a->buffers[cpu] = cpu_buffer_b;
2215 buffer_b->buffers[cpu] = cpu_buffer_a;
2217 cpu_buffer_b->buffer = buffer_a;
2218 cpu_buffer_a->buffer = buffer_b;
2220 atomic_dec(&cpu_buffer_a->record_disabled);
2221 atomic_dec(&cpu_buffer_b->record_disabled);
2227 rb_simple_read(struct file *filp, char __user *ubuf,
2228 size_t cnt, loff_t *ppos)
2230 long *p = filp->private_data;
2234 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2235 r = sprintf(buf, "permanently disabled\n");
2237 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2239 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2243 rb_simple_write(struct file *filp, const char __user *ubuf,
2244 size_t cnt, loff_t *ppos)
2246 long *p = filp->private_data;
2251 if (cnt >= sizeof(buf))
2254 if (copy_from_user(&buf, ubuf, cnt))
2259 ret = strict_strtoul(buf, 10, &val);
2264 set_bit(RB_BUFFERS_ON_BIT, p);
2266 clear_bit(RB_BUFFERS_ON_BIT, p);
2273 static struct file_operations rb_simple_fops = {
2274 .open = tracing_open_generic,
2275 .read = rb_simple_read,
2276 .write = rb_simple_write,
2280 static __init int rb_init_debugfs(void)
2282 struct dentry *d_tracer;
2283 struct dentry *entry;
2285 d_tracer = tracing_init_dentry();
2287 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2288 &ring_buffer_flags, &rb_simple_fops);
2290 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2295 fs_initcall(rb_init_debugfs);