2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats {
18 struct list_head list;
21 /* Can be inserted from interrupt or user context, need to be atomic */
24 * Don't need to be atomic, works are serialized in a single workqueue thread
27 unsigned int executed;
30 /* List of workqueue threads on one cpu */
31 struct workqueue_global_stats {
32 struct list_head list;
36 /* Don't need a global lock because allocated before the workqueues, and
39 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
40 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
42 /* Insertion of a work */
44 probe_workqueue_insertion(struct task_struct *wq_thread,
45 struct work_struct *work)
47 int cpu = cpumask_first(&wq_thread->cpus_allowed);
48 struct cpu_workqueue_stats *node;
51 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
52 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
53 if (node->pid == wq_thread->pid) {
54 atomic_inc(&node->inserted);
58 pr_debug("trace_workqueue: entry not found\n");
60 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
63 /* Execution of a work */
65 probe_workqueue_execution(struct task_struct *wq_thread,
66 struct work_struct *work)
68 int cpu = cpumask_first(&wq_thread->cpus_allowed);
69 struct cpu_workqueue_stats *node;
72 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
73 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
74 if (node->pid == wq_thread->pid) {
79 pr_debug("trace_workqueue: entry not found\n");
81 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
84 /* Creation of a cpu workqueue thread */
85 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
87 struct cpu_workqueue_stats *cws;
92 /* Workqueues are sometimes created in atomic context */
93 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
95 pr_warning("trace_workqueue: not enough memory\n");
98 INIT_LIST_HEAD(&cws->list);
101 cws->pid = wq_thread->pid;
103 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
104 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
105 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
108 /* Destruction of a cpu workqueue thread */
109 static void probe_workqueue_destruction(struct task_struct *wq_thread)
111 /* Workqueue only execute on one cpu */
112 int cpu = cpumask_first(&wq_thread->cpus_allowed);
113 struct cpu_workqueue_stats *node, *next;
116 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
117 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
119 if (node->pid == wq_thread->pid) {
120 list_del(&node->list);
126 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
128 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
132 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
135 struct cpu_workqueue_stats *ret = NULL;
138 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
140 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
141 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
142 struct cpu_workqueue_stats, list);
144 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
149 static void *workqueue_stat_start(struct tracer_stat *trace)
154 for_each_possible_cpu(cpu) {
155 ret = workqueue_stat_start_cpu(cpu);
162 static void *workqueue_stat_next(void *prev, int idx)
164 struct cpu_workqueue_stats *prev_cws = prev;
165 int cpu = prev_cws->cpu;
169 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
170 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
171 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
173 cpu = cpumask_next(cpu, cpu_possible_mask);
174 if (cpu >= nr_cpu_ids)
176 } while (!(ret = workqueue_stat_start_cpu(cpu)));
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
181 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
185 static int workqueue_stat_show(struct seq_file *s, void *p)
187 struct cpu_workqueue_stats *cws = p;
191 struct task_struct *tsk;
193 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
194 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
196 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
198 pid = find_get_pid(cws->pid);
200 tsk = get_pid_task(pid, PIDTYPE_PID);
202 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
203 atomic_read(&cws->inserted), cws->executed,
205 put_task_struct(tsk);
213 static int workqueue_stat_headers(struct seq_file *s)
215 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
216 seq_printf(s, "# | | | |\n");
220 struct tracer_stat workqueue_stats __read_mostly = {
221 .name = "workqueues",
222 .stat_start = workqueue_stat_start,
223 .stat_next = workqueue_stat_next,
224 .stat_show = workqueue_stat_show,
225 .stat_headers = workqueue_stat_headers
229 int __init stat_workqueue_init(void)
231 if (register_stat_tracer(&workqueue_stats)) {
232 pr_warning("Unable to register workqueue stat tracer\n");
238 fs_initcall(stat_workqueue_init);
241 * Workqueues are created very early, just after pre-smp initcalls.
242 * So we must register our tracepoints at this stage.
244 int __init trace_workqueue_early_init(void)
248 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
252 ret = register_trace_workqueue_execution(probe_workqueue_execution);
256 ret = register_trace_workqueue_creation(probe_workqueue_creation);
260 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
264 for_each_possible_cpu(cpu) {
265 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
266 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
272 unregister_trace_workqueue_creation(probe_workqueue_creation);
274 unregister_trace_workqueue_execution(probe_workqueue_execution);
276 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
278 pr_warning("trace_workqueue: unable to trace workqueues\n");
282 early_initcall(trace_workqueue_early_init);