posix-clock: Fix return code on the poll method's error path
[pandora-kernel.git] / kernel / events / ring_buffer.c
1 /*
2  * Performance events ring-buffer code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15
16 #include "internal.h"
17
18 static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19                               unsigned long offset, unsigned long head)
20 {
21         unsigned long mask;
22
23         if (!rb->writable)
24                 return true;
25
26         mask = perf_data_size(rb) - 1;
27
28         offset = (offset - tail) & mask;
29         head   = (head   - tail) & mask;
30
31         if ((int)(head - offset) < 0)
32                 return false;
33
34         return true;
35 }
36
37 static void perf_output_wakeup(struct perf_output_handle *handle)
38 {
39         atomic_set(&handle->rb->poll, POLL_IN);
40
41         handle->event->pending_wakeup = 1;
42         irq_work_queue(&handle->event->pending);
43 }
44
45 /*
46  * We need to ensure a later event_id doesn't publish a head when a former
47  * event isn't done writing. However since we need to deal with NMIs we
48  * cannot fully serialize things.
49  *
50  * We only publish the head (and generate a wakeup) when the outer-most
51  * event completes.
52  */
53 static void perf_output_get_handle(struct perf_output_handle *handle)
54 {
55         struct ring_buffer *rb = handle->rb;
56
57         preempt_disable();
58         local_inc(&rb->nest);
59         handle->wakeup = local_read(&rb->wakeup);
60 }
61
62 static void perf_output_put_handle(struct perf_output_handle *handle)
63 {
64         struct ring_buffer *rb = handle->rb;
65         unsigned long head;
66
67 again:
68         head = local_read(&rb->head);
69
70         /*
71          * IRQ/NMI can happen here, which means we can miss a head update.
72          */
73
74         if (!local_dec_and_test(&rb->nest))
75                 goto out;
76
77         /*
78          * Since the mmap() consumer (userspace) can run on a different CPU:
79          *
80          *   kernel                             user
81          *
82          *   READ ->data_tail                   READ ->data_head
83          *   smp_mb()   (A)                     smp_rmb()       (C)
84          *   WRITE $data                        READ $data
85          *   smp_wmb()  (B)                     smp_mb()        (D)
86          *   STORE ->data_head                  WRITE ->data_tail
87          *
88          * Where A pairs with D, and B pairs with C.
89          *
90          * I don't think A needs to be a full barrier because we won't in fact
91          * write data until we see the store from userspace. So we simply don't
92          * issue the data WRITE until we observe it. Be conservative for now.
93          *
94          * OTOH, D needs to be a full barrier since it separates the data READ
95          * from the tail WRITE.
96          *
97          * For B a WMB is sufficient since it separates two WRITEs, and for C
98          * an RMB is sufficient since it separates two READs.
99          *
100          * See perf_output_begin().
101          */
102         smp_wmb();
103         rb->user_page->data_head = head;
104
105         /*
106          * Now check if we missed an update, rely on the (compiler)
107          * barrier in atomic_dec_and_test() to re-read rb->head.
108          */
109         if (unlikely(head != local_read(&rb->head))) {
110                 local_inc(&rb->nest);
111                 goto again;
112         }
113
114         if (handle->wakeup != local_read(&rb->wakeup))
115                 perf_output_wakeup(handle);
116
117 out:
118         preempt_enable();
119 }
120
121 int perf_output_begin(struct perf_output_handle *handle,
122                       struct perf_event *event, unsigned int size)
123 {
124         struct ring_buffer *rb;
125         unsigned long tail, offset, head;
126         int have_lost;
127         struct perf_sample_data sample_data;
128         struct {
129                 struct perf_event_header header;
130                 u64                      id;
131                 u64                      lost;
132         } lost_event;
133
134         rcu_read_lock();
135         /*
136          * For inherited events we send all the output towards the parent.
137          */
138         if (event->parent)
139                 event = event->parent;
140
141         rb = rcu_dereference(event->rb);
142         if (!rb)
143                 goto out;
144
145         handle->rb      = rb;
146         handle->event   = event;
147
148         if (!rb->nr_pages)
149                 goto out;
150
151         have_lost = local_read(&rb->lost);
152         if (have_lost) {
153                 lost_event.header.size = sizeof(lost_event);
154                 perf_event_header__init_id(&lost_event.header, &sample_data,
155                                            event);
156                 size += lost_event.header.size;
157         }
158
159         perf_output_get_handle(handle);
160
161         do {
162                 /*
163                  * Userspace could choose to issue a mb() before updating the
164                  * tail pointer. So that all reads will be completed before the
165                  * write is issued.
166                  *
167                  * See perf_output_put_handle().
168                  */
169                 tail = ACCESS_ONCE(rb->user_page->data_tail);
170                 smp_mb();
171                 offset = head = local_read(&rb->head);
172                 head += size;
173                 if (unlikely(!perf_output_space(rb, tail, offset, head)))
174                         goto fail;
175         } while (local_cmpxchg(&rb->head, offset, head) != offset);
176
177         if (head - local_read(&rb->wakeup) > rb->watermark)
178                 local_add(rb->watermark, &rb->wakeup);
179
180         handle->page = offset >> (PAGE_SHIFT + page_order(rb));
181         handle->page &= rb->nr_pages - 1;
182         handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
183         handle->addr = rb->data_pages[handle->page];
184         handle->addr += handle->size;
185         handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
186
187         if (have_lost) {
188                 lost_event.header.type = PERF_RECORD_LOST;
189                 lost_event.header.misc = 0;
190                 lost_event.id          = event->id;
191                 lost_event.lost        = local_xchg(&rb->lost, 0);
192
193                 perf_output_put(handle, lost_event);
194                 perf_event__output_id_sample(event, handle, &sample_data);
195         }
196
197         return 0;
198
199 fail:
200         local_inc(&rb->lost);
201         perf_output_put_handle(handle);
202 out:
203         rcu_read_unlock();
204
205         return -ENOSPC;
206 }
207
208 void perf_output_copy(struct perf_output_handle *handle,
209                       const void *buf, unsigned int len)
210 {
211         __output_copy(handle, buf, len);
212 }
213
214 void perf_output_end(struct perf_output_handle *handle)
215 {
216         perf_output_put_handle(handle);
217         rcu_read_unlock();
218 }
219
220 static void
221 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
222 {
223         long max_size = perf_data_size(rb);
224
225         if (watermark)
226                 rb->watermark = min(max_size, watermark);
227
228         if (!rb->watermark)
229                 rb->watermark = max_size / 2;
230
231         if (flags & RING_BUFFER_WRITABLE)
232                 rb->writable = 1;
233
234         atomic_set(&rb->refcount, 1);
235
236         INIT_LIST_HEAD(&rb->event_list);
237         spin_lock_init(&rb->event_lock);
238 }
239
240 #ifndef CONFIG_PERF_USE_VMALLOC
241
242 /*
243  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
244  */
245
246 struct page *
247 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
248 {
249         if (pgoff > rb->nr_pages)
250                 return NULL;
251
252         if (pgoff == 0)
253                 return virt_to_page(rb->user_page);
254
255         return virt_to_page(rb->data_pages[pgoff - 1]);
256 }
257
258 static void *perf_mmap_alloc_page(int cpu)
259 {
260         struct page *page;
261         int node;
262
263         node = (cpu == -1) ? cpu : cpu_to_node(cpu);
264         page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
265         if (!page)
266                 return NULL;
267
268         return page_address(page);
269 }
270
271 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
272 {
273         struct ring_buffer *rb;
274         unsigned long size;
275         int i;
276
277         size = sizeof(struct ring_buffer);
278         size += nr_pages * sizeof(void *);
279
280         rb = kzalloc(size, GFP_KERNEL);
281         if (!rb)
282                 goto fail;
283
284         rb->user_page = perf_mmap_alloc_page(cpu);
285         if (!rb->user_page)
286                 goto fail_user_page;
287
288         for (i = 0; i < nr_pages; i++) {
289                 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
290                 if (!rb->data_pages[i])
291                         goto fail_data_pages;
292         }
293
294         rb->nr_pages = nr_pages;
295
296         ring_buffer_init(rb, watermark, flags);
297
298         return rb;
299
300 fail_data_pages:
301         for (i--; i >= 0; i--)
302                 free_page((unsigned long)rb->data_pages[i]);
303
304         free_page((unsigned long)rb->user_page);
305
306 fail_user_page:
307         kfree(rb);
308
309 fail:
310         return NULL;
311 }
312
313 static void perf_mmap_free_page(unsigned long addr)
314 {
315         struct page *page = virt_to_page((void *)addr);
316
317         page->mapping = NULL;
318         __free_page(page);
319 }
320
321 void rb_free(struct ring_buffer *rb)
322 {
323         int i;
324
325         perf_mmap_free_page((unsigned long)rb->user_page);
326         for (i = 0; i < rb->nr_pages; i++)
327                 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
328         kfree(rb);
329 }
330
331 #else
332
333 struct page *
334 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
335 {
336         if (pgoff > (1UL << page_order(rb)))
337                 return NULL;
338
339         return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
340 }
341
342 static void perf_mmap_unmark_page(void *addr)
343 {
344         struct page *page = vmalloc_to_page(addr);
345
346         page->mapping = NULL;
347 }
348
349 static void rb_free_work(struct work_struct *work)
350 {
351         struct ring_buffer *rb;
352         void *base;
353         int i, nr;
354
355         rb = container_of(work, struct ring_buffer, work);
356         nr = 1 << page_order(rb);
357
358         base = rb->user_page;
359         for (i = 0; i < nr + 1; i++)
360                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
361
362         vfree(base);
363         kfree(rb);
364 }
365
366 void rb_free(struct ring_buffer *rb)
367 {
368         schedule_work(&rb->work);
369 }
370
371 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
372 {
373         struct ring_buffer *rb;
374         unsigned long size;
375         void *all_buf;
376
377         size = sizeof(struct ring_buffer);
378         size += sizeof(void *);
379
380         rb = kzalloc(size, GFP_KERNEL);
381         if (!rb)
382                 goto fail;
383
384         INIT_WORK(&rb->work, rb_free_work);
385
386         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
387         if (!all_buf)
388                 goto fail_all_buf;
389
390         rb->user_page = all_buf;
391         rb->data_pages[0] = all_buf + PAGE_SIZE;
392         rb->page_order = ilog2(nr_pages);
393         rb->nr_pages = 1;
394
395         ring_buffer_init(rb, watermark, flags);
396
397         return rb;
398
399 fail_all_buf:
400         kfree(rb);
401
402 fail:
403         return NULL;
404 }
405
406 #endif