Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / arch / x86 / vdso / vclock_gettime.c
1 /*
2  * Copyright 2006 Andi Kleen, SUSE Labs.
3  * Subject to the GNU Public License, v.2
4  *
5  * Fast user context implementation of clock_gettime, gettimeofday, and time.
6  *
7  * The code should have no internal unresolved relocations.
8  * Check with readelf after changing.
9  * Also alternative() doesn't work.
10  */
11
12 /* Disable profiling for userspace code: */
13 #define DISABLE_BRANCH_PROFILING
14
15 #include <linux/kernel.h>
16 #include <linux/posix-timers.h>
17 #include <linux/time.h>
18 #include <linux/string.h>
19 #include <asm/vsyscall.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
22 #include <asm/hpet.h>
23 #include <asm/unistd.h>
24 #include <asm/io.h>
25
26 #define gtod (&VVAR(vsyscall_gtod_data))
27
28 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
29 {
30         long ret;
31         asm("syscall" : "=a" (ret) :
32             "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
33         return ret;
34 }
35
36 notrace static inline long vgetns(void)
37 {
38         long v;
39         cycles_t (*vread)(void);
40         vread = gtod->clock.vread;
41         v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
42         return (v * gtod->clock.mult) >> gtod->clock.shift;
43 }
44
45 notrace static noinline int do_realtime(struct timespec *ts)
46 {
47         unsigned long seq, ns;
48         do {
49                 seq = read_seqbegin(&gtod->lock);
50                 ts->tv_sec = gtod->wall_time_sec;
51                 ts->tv_nsec = gtod->wall_time_nsec;
52                 ns = vgetns();
53         } while (unlikely(read_seqretry(&gtod->lock, seq)));
54         timespec_add_ns(ts, ns);
55         return 0;
56 }
57
58 notrace static noinline int do_monotonic(struct timespec *ts)
59 {
60         unsigned long seq, ns, secs;
61         do {
62                 seq = read_seqbegin(&gtod->lock);
63                 secs = gtod->wall_time_sec;
64                 ns = gtod->wall_time_nsec + vgetns();
65                 secs += gtod->wall_to_monotonic.tv_sec;
66                 ns += gtod->wall_to_monotonic.tv_nsec;
67         } while (unlikely(read_seqretry(&gtod->lock, seq)));
68
69         /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
70          * are all guaranteed to be nonnegative.
71          */
72         while (ns >= NSEC_PER_SEC) {
73                 ns -= NSEC_PER_SEC;
74                 ++secs;
75         }
76         ts->tv_sec = secs;
77         ts->tv_nsec = ns;
78
79         return 0;
80 }
81
82 notrace static noinline int do_realtime_coarse(struct timespec *ts)
83 {
84         unsigned long seq;
85         do {
86                 seq = read_seqbegin(&gtod->lock);
87                 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
88                 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
89         } while (unlikely(read_seqretry(&gtod->lock, seq)));
90         return 0;
91 }
92
93 notrace static noinline int do_monotonic_coarse(struct timespec *ts)
94 {
95         unsigned long seq, ns, secs;
96         do {
97                 seq = read_seqbegin(&gtod->lock);
98                 secs = gtod->wall_time_coarse.tv_sec;
99                 ns = gtod->wall_time_coarse.tv_nsec;
100                 secs += gtod->wall_to_monotonic.tv_sec;
101                 ns += gtod->wall_to_monotonic.tv_nsec;
102         } while (unlikely(read_seqretry(&gtod->lock, seq)));
103
104         /* wall_time_nsec and wall_to_monotonic.tv_nsec are
105          * guaranteed to be between 0 and NSEC_PER_SEC.
106          */
107         if (ns >= NSEC_PER_SEC) {
108                 ns -= NSEC_PER_SEC;
109                 ++secs;
110         }
111         ts->tv_sec = secs;
112         ts->tv_nsec = ns;
113
114         return 0;
115 }
116
117 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
118 {
119         if (likely(gtod->sysctl_enabled))
120                 switch (clock) {
121                 case CLOCK_REALTIME:
122                         if (likely(gtod->clock.vread))
123                                 return do_realtime(ts);
124                         break;
125                 case CLOCK_MONOTONIC:
126                         if (likely(gtod->clock.vread))
127                                 return do_monotonic(ts);
128                         break;
129                 case CLOCK_REALTIME_COARSE:
130                         return do_realtime_coarse(ts);
131                 case CLOCK_MONOTONIC_COARSE:
132                         return do_monotonic_coarse(ts);
133                 }
134         return vdso_fallback_gettime(clock, ts);
135 }
136 int clock_gettime(clockid_t, struct timespec *)
137         __attribute__((weak, alias("__vdso_clock_gettime")));
138
139 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
140 {
141         long ret;
142         if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
143                 if (likely(tv != NULL)) {
144                         BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
145                                      offsetof(struct timespec, tv_nsec) ||
146                                      sizeof(*tv) != sizeof(struct timespec));
147                         do_realtime((struct timespec *)tv);
148                         tv->tv_usec /= 1000;
149                 }
150                 if (unlikely(tz != NULL)) {
151                         /* Avoid memcpy. Some old compilers fail to inline it */
152                         tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
153                         tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
154                 }
155                 return 0;
156         }
157         asm("syscall" : "=a" (ret) :
158             "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
159         return ret;
160 }
161 int gettimeofday(struct timeval *, struct timezone *)
162         __attribute__((weak, alias("__vdso_gettimeofday")));
163
164 /* This will break when the xtime seconds get inaccurate, but that is
165  * unlikely */
166
167 static __always_inline long time_syscall(long *t)
168 {
169         long secs;
170         asm volatile("syscall"
171                      : "=a" (secs)
172                      : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
173         return secs;
174 }
175
176 notrace time_t __vdso_time(time_t *t)
177 {
178         time_t result;
179
180         if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181                 return time_syscall(t);
182
183         /* This is atomic on x86_64 so we don't need any locks. */
184         result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
185
186         if (t)
187                 *t = result;
188         return result;
189 }
190 int time(time_t *t)
191         __attribute__((weak, alias("__vdso_time")));