perf/core: Invert perf_read_group() loops
[pandora-kernel.git] / kernel / events / core.c
index 8b37d95..d8ff715 100644 (file)
@@ -3277,50 +3277,71 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
 }
 EXPORT_SYMBOL_GPL(perf_event_read_value);
 
-static int perf_event_read_group(struct perf_event *event,
-                                  u64 read_format, char __user *buf)
+static void __perf_read_group_add(struct perf_event *leader,
+                                       u64 read_format, u64 *values)
 {
-       struct perf_event *leader = event->group_leader, *sub;
-       struct perf_event_context *ctx = leader->ctx;
-       int n = 0, size = 0, ret;
+       struct perf_event *sub;
+       int n = 1; /* skip @nr */
        u64 count, enabled, running;
-       u64 values[5];
-
-       lockdep_assert_held(&ctx->mutex);
 
        count = perf_event_read_value(leader, &enabled, &running);
 
-       values[n++] = 1 + leader->nr_siblings;
+       /*
+        * Since we co-schedule groups, {enabled,running} times of siblings
+        * will be identical to those of the leader, so we only publish one
+        * set.
+        */
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
                values[n++] = enabled;
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                values[n++] = running;
-       values[n++] = count;
+
+       /*
+        * Write {count,id} tuples for every sibling.
+        */
+       values[n++] += count;
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
-       size = n * sizeof(u64);
-
-       if (copy_to_user(buf, values, size))
-               return -EFAULT;
-
-       ret = size;
-
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
-               n = 0;
-
                values[n++] = perf_event_read_value(sub, &enabled, &running);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
+       }
+}
 
-               size = n * sizeof(u64);
+static int perf_event_read_group(struct perf_event *event,
+                                  u64 read_format, char __user *buf)
+{
+       struct perf_event *leader = event->group_leader, *child;
+       struct perf_event_context *ctx = leader->ctx;
+       int ret = event->read_size;
+       u64 *values;
 
-               if (copy_to_user(buf + ret, values, size)) {
-                       return -EFAULT;
-               }
+       lockdep_assert_held(&ctx->mutex);
 
-               ret += size;
-       }
+       values = kzalloc(event->read_size, GFP_KERNEL);
+       if (!values)
+               return -ENOMEM;
+
+       values[0] = 1 + leader->nr_siblings;
+
+       /*
+        * By locking the child_mutex of the leader we effectively
+        * lock the child list of all siblings.. XXX explain how.
+        */
+       mutex_lock(&leader->child_mutex);
+
+       __perf_read_group_add(leader, read_format, values);
+       list_for_each_entry(child, &leader->child_list, child_list)
+               __perf_read_group_add(child, read_format, values);
+
+       mutex_unlock(&leader->child_mutex);
+
+       if (copy_to_user(buf, values, event->read_size))
+               ret = -EFAULT;
+
+       kfree(values);
 
        return ret;
 }