2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
18 static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19 unsigned long offset, unsigned long head)
26 mask = perf_data_size(rb) - 1;
28 offset = (offset - tail) & mask;
29 head = (head - tail) & mask;
31 if ((int)(head - offset) < 0)
37 static void perf_output_wakeup(struct perf_output_handle *handle)
39 atomic_set(&handle->rb->poll, POLL_IN);
41 handle->event->pending_wakeup = 1;
42 irq_work_queue(&handle->event->pending);
46 * We need to ensure a later event_id doesn't publish a head when a former
47 * event isn't done writing. However since we need to deal with NMIs we
48 * cannot fully serialize things.
50 * We only publish the head (and generate a wakeup) when the outer-most
53 static void perf_output_get_handle(struct perf_output_handle *handle)
55 struct ring_buffer *rb = handle->rb;
59 handle->wakeup = local_read(&rb->wakeup);
62 static void perf_output_put_handle(struct perf_output_handle *handle)
64 struct ring_buffer *rb = handle->rb;
68 head = local_read(&rb->head);
71 * IRQ/NMI can happen here, which means we can miss a head update.
74 if (!local_dec_and_test(&rb->nest))
78 * Since the mmap() consumer (userspace) can run on a different CPU:
82 * READ ->data_tail READ ->data_head
83 * smp_mb() (A) smp_rmb() (C)
84 * WRITE $data READ $data
85 * smp_wmb() (B) smp_mb() (D)
86 * STORE ->data_head WRITE ->data_tail
88 * Where A pairs with D, and B pairs with C.
90 * I don't think A needs to be a full barrier because we won't in fact
91 * write data until we see the store from userspace. So we simply don't
92 * issue the data WRITE until we observe it. Be conservative for now.
94 * OTOH, D needs to be a full barrier since it separates the data READ
95 * from the tail WRITE.
97 * For B a WMB is sufficient since it separates two WRITEs, and for C
98 * an RMB is sufficient since it separates two READs.
100 * See perf_output_begin().
103 rb->user_page->data_head = head;
106 * Now check if we missed an update, rely on the (compiler)
107 * barrier in atomic_dec_and_test() to re-read rb->head.
109 if (unlikely(head != local_read(&rb->head))) {
110 local_inc(&rb->nest);
114 if (handle->wakeup != local_read(&rb->wakeup))
115 perf_output_wakeup(handle);
121 int perf_output_begin(struct perf_output_handle *handle,
122 struct perf_event *event, unsigned int size)
124 struct ring_buffer *rb;
125 unsigned long tail, offset, head;
127 struct perf_sample_data sample_data;
129 struct perf_event_header header;
136 * For inherited events we send all the output towards the parent.
139 event = event->parent;
141 rb = rcu_dereference(event->rb);
146 handle->event = event;
151 have_lost = local_read(&rb->lost);
153 lost_event.header.size = sizeof(lost_event);
154 perf_event_header__init_id(&lost_event.header, &sample_data,
156 size += lost_event.header.size;
159 perf_output_get_handle(handle);
163 * Userspace could choose to issue a mb() before updating the
164 * tail pointer. So that all reads will be completed before the
167 * See perf_output_put_handle().
169 tail = ACCESS_ONCE(rb->user_page->data_tail);
171 offset = head = local_read(&rb->head);
173 if (unlikely(!perf_output_space(rb, tail, offset, head)))
175 } while (local_cmpxchg(&rb->head, offset, head) != offset);
177 if (head - local_read(&rb->wakeup) > rb->watermark)
178 local_add(rb->watermark, &rb->wakeup);
180 handle->page = offset >> (PAGE_SHIFT + page_order(rb));
181 handle->page &= rb->nr_pages - 1;
182 handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
183 handle->addr = rb->data_pages[handle->page];
184 handle->addr += handle->size;
185 handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
188 lost_event.header.type = PERF_RECORD_LOST;
189 lost_event.header.misc = 0;
190 lost_event.id = event->id;
191 lost_event.lost = local_xchg(&rb->lost, 0);
193 perf_output_put(handle, lost_event);
194 perf_event__output_id_sample(event, handle, &sample_data);
200 local_inc(&rb->lost);
201 perf_output_put_handle(handle);
208 void perf_output_copy(struct perf_output_handle *handle,
209 const void *buf, unsigned int len)
211 __output_copy(handle, buf, len);
214 void perf_output_end(struct perf_output_handle *handle)
216 perf_output_put_handle(handle);
221 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
223 long max_size = perf_data_size(rb);
226 rb->watermark = min(max_size, watermark);
229 rb->watermark = max_size / 2;
231 if (flags & RING_BUFFER_WRITABLE)
234 atomic_set(&rb->refcount, 1);
236 INIT_LIST_HEAD(&rb->event_list);
237 spin_lock_init(&rb->event_lock);
240 #ifndef CONFIG_PERF_USE_VMALLOC
243 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
247 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
249 if (pgoff > rb->nr_pages)
253 return virt_to_page(rb->user_page);
255 return virt_to_page(rb->data_pages[pgoff - 1]);
258 static void *perf_mmap_alloc_page(int cpu)
263 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
264 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
268 return page_address(page);
271 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
273 struct ring_buffer *rb;
277 size = sizeof(struct ring_buffer);
278 size += nr_pages * sizeof(void *);
280 rb = kzalloc(size, GFP_KERNEL);
284 rb->user_page = perf_mmap_alloc_page(cpu);
288 for (i = 0; i < nr_pages; i++) {
289 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
290 if (!rb->data_pages[i])
291 goto fail_data_pages;
294 rb->nr_pages = nr_pages;
296 ring_buffer_init(rb, watermark, flags);
301 for (i--; i >= 0; i--)
302 free_page((unsigned long)rb->data_pages[i]);
304 free_page((unsigned long)rb->user_page);
313 static void perf_mmap_free_page(unsigned long addr)
315 struct page *page = virt_to_page((void *)addr);
317 page->mapping = NULL;
321 void rb_free(struct ring_buffer *rb)
325 perf_mmap_free_page((unsigned long)rb->user_page);
326 for (i = 0; i < rb->nr_pages; i++)
327 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
334 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
336 if (pgoff > (1UL << page_order(rb)))
339 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
342 static void perf_mmap_unmark_page(void *addr)
344 struct page *page = vmalloc_to_page(addr);
346 page->mapping = NULL;
349 static void rb_free_work(struct work_struct *work)
351 struct ring_buffer *rb;
355 rb = container_of(work, struct ring_buffer, work);
356 nr = 1 << page_order(rb);
358 base = rb->user_page;
359 for (i = 0; i < nr + 1; i++)
360 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
366 void rb_free(struct ring_buffer *rb)
368 schedule_work(&rb->work);
371 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
373 struct ring_buffer *rb;
377 size = sizeof(struct ring_buffer);
378 size += sizeof(void *);
380 rb = kzalloc(size, GFP_KERNEL);
384 INIT_WORK(&rb->work, rb_free_work);
386 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
390 rb->user_page = all_buf;
391 rb->data_pages[0] = all_buf + PAGE_SIZE;
392 rb->page_order = ilog2(nr_pages);
395 ring_buffer_init(rb, watermark, flags);