1 #include <linux/cpumask.h>
3 #include <linux/init.h>
4 #include <linux/interrupt.h>
5 #include <linux/kernel_stat.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/seq_file.h>
9 #include <linux/slab.h>
10 #include <linux/time.h>
11 #include <linux/irqnr.h>
12 #include <asm/cputime.h>
13 #include <linux/tick.h>
15 #ifndef arch_irq_stat_cpu
16 #define arch_irq_stat_cpu(cpu) 0
19 #define arch_irq_stat() 0
21 #ifndef arch_idle_time
22 #define arch_idle_time(cpu) 0
25 static cputime64_t get_idle_time(int cpu)
27 u64 idle_time = -1ULL;
31 idle_time = get_cpu_idle_time_us(cpu, NULL);
33 if (idle_time == -1ULL) {
34 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
35 idle = kstat_cpu(cpu).cpustat.idle;
36 idle = cputime64_add(idle, arch_idle_time(cpu));
38 idle = usecs_to_cputime64(idle_time);
43 static cputime64_t get_iowait_time(int cpu)
45 u64 iowait_time = -1ULL;
49 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
51 if (iowait_time == -1ULL)
52 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
53 iowait = kstat_cpu(cpu).cpustat.iowait;
55 iowait = usecs_to_cputime64(iowait_time);
60 static int show_stat(struct seq_file *p, void *v)
64 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
65 cputime64_t guest, guest_nice;
68 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
69 struct timespec boottime;
71 user = nice = system = idle = iowait =
72 irq = softirq = steal = cputime64_zero;
73 guest = guest_nice = cputime64_zero;
74 getboottime(&boottime);
75 jif = boottime.tv_sec;
77 for_each_possible_cpu(i) {
78 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
79 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
80 system = cputime64_add(system, kstat_cpu(i).cpustat.system);
81 idle = cputime64_add(idle, get_idle_time(i));
82 iowait = cputime64_add(iowait, get_iowait_time(i));
83 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
84 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
85 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
86 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
87 guest_nice = cputime64_add(guest_nice,
88 kstat_cpu(i).cpustat.guest_nice);
89 sum += kstat_cpu_irqs_sum(i);
90 sum += arch_irq_stat_cpu(i);
92 for (j = 0; j < NR_SOFTIRQS; j++) {
93 unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
95 per_softirq_sums[j] += softirq_stat;
96 sum_softirq += softirq_stat;
99 sum += arch_irq_stat();
101 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu "
103 (unsigned long long)cputime64_to_clock_t(user),
104 (unsigned long long)cputime64_to_clock_t(nice),
105 (unsigned long long)cputime64_to_clock_t(system),
106 (unsigned long long)cputime64_to_clock_t(idle),
107 (unsigned long long)cputime64_to_clock_t(iowait),
108 (unsigned long long)cputime64_to_clock_t(irq),
109 (unsigned long long)cputime64_to_clock_t(softirq),
110 (unsigned long long)cputime64_to_clock_t(steal),
111 (unsigned long long)cputime64_to_clock_t(guest),
112 (unsigned long long)cputime64_to_clock_t(guest_nice));
113 for_each_online_cpu(i) {
114 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
115 user = kstat_cpu(i).cpustat.user;
116 nice = kstat_cpu(i).cpustat.nice;
117 system = kstat_cpu(i).cpustat.system;
118 idle = get_idle_time(i);
119 iowait = get_iowait_time(i);
120 irq = kstat_cpu(i).cpustat.irq;
121 softirq = kstat_cpu(i).cpustat.softirq;
122 steal = kstat_cpu(i).cpustat.steal;
123 guest = kstat_cpu(i).cpustat.guest;
124 guest_nice = kstat_cpu(i).cpustat.guest_nice;
126 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
129 (unsigned long long)cputime64_to_clock_t(user),
130 (unsigned long long)cputime64_to_clock_t(nice),
131 (unsigned long long)cputime64_to_clock_t(system),
132 (unsigned long long)cputime64_to_clock_t(idle),
133 (unsigned long long)cputime64_to_clock_t(iowait),
134 (unsigned long long)cputime64_to_clock_t(irq),
135 (unsigned long long)cputime64_to_clock_t(softirq),
136 (unsigned long long)cputime64_to_clock_t(steal),
137 (unsigned long long)cputime64_to_clock_t(guest),
138 (unsigned long long)cputime64_to_clock_t(guest_nice));
140 seq_printf(p, "intr %llu", (unsigned long long)sum);
142 /* sum again ? it could be updated? */
144 seq_printf(p, " %u", kstat_irqs_usr(j));
150 "procs_running %lu\n"
151 "procs_blocked %lu\n",
152 nr_context_switches(),
158 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
160 for (i = 0; i < NR_SOFTIRQS; i++)
161 seq_printf(p, " %u", per_softirq_sums[i]);
167 static int stat_open(struct inode *inode, struct file *file)
169 unsigned size = 4096 * (1 + num_possible_cpus() / 32);
174 /* don't ask for more than the kmalloc() max size */
175 if (size > KMALLOC_MAX_SIZE)
176 size = KMALLOC_MAX_SIZE;
177 buf = kmalloc(size, GFP_KERNEL);
181 res = single_open(file, show_stat, NULL);
183 m = file->private_data;
191 static const struct file_operations proc_stat_operations = {
195 .release = single_release,
198 static int __init proc_stat_init(void)
200 proc_create("stat", 0, NULL, &proc_stat_operations);
203 module_init(proc_stat_init);