x86/tls: Validate TLS entries to protect espfix
[pandora-kernel.git] / arch / x86 / kernel / tls.c
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/sched.h>
4 #include <linux/user.h>
5 #include <linux/regset.h>
6 #include <linux/syscalls.h>
7
8 #include <asm/uaccess.h>
9 #include <asm/desc.h>
10 #include <asm/ldt.h>
11 #include <asm/processor.h>
12 #include <asm/proto.h>
13
14 #include "tls.h"
15
16 /*
17  * sys_alloc_thread_area: get a yet unused TLS descriptor index.
18  */
19 static int get_free_idx(void)
20 {
21         struct thread_struct *t = &current->thread;
22         int idx;
23
24         for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
25                 if (desc_empty(&t->tls_array[idx]))
26                         return idx + GDT_ENTRY_TLS_MIN;
27         return -ESRCH;
28 }
29
30 static bool tls_desc_okay(const struct user_desc *info)
31 {
32         if (LDT_empty(info))
33                 return true;
34
35         /*
36          * espfix is required for 16-bit data segments, but espfix
37          * only works for LDT segments.
38          */
39         if (!info->seg_32bit)
40                 return false;
41
42         return true;
43 }
44
45 static void set_tls_desc(struct task_struct *p, int idx,
46                          const struct user_desc *info, int n)
47 {
48         struct thread_struct *t = &p->thread;
49         struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
50         int cpu;
51
52         /*
53          * We must not get preempted while modifying the TLS.
54          */
55         cpu = get_cpu();
56
57         while (n-- > 0) {
58                 if (LDT_empty(info))
59                         desc->a = desc->b = 0;
60                 else
61                         fill_ldt(desc, info);
62                 ++info;
63                 ++desc;
64         }
65
66         if (t == &current->thread)
67                 load_TLS(t, cpu);
68
69         put_cpu();
70 }
71
72 /*
73  * Set a given TLS descriptor:
74  */
75 int do_set_thread_area(struct task_struct *p, int idx,
76                        struct user_desc __user *u_info,
77                        int can_allocate)
78 {
79         struct user_desc info;
80
81         if (copy_from_user(&info, u_info, sizeof(info)))
82                 return -EFAULT;
83
84         if (!tls_desc_okay(&info))
85                 return -EINVAL;
86
87         if (idx == -1)
88                 idx = info.entry_number;
89
90         /*
91          * index -1 means the kernel should try to find and
92          * allocate an empty descriptor:
93          */
94         if (idx == -1 && can_allocate) {
95                 idx = get_free_idx();
96                 if (idx < 0)
97                         return idx;
98                 if (put_user(idx, &u_info->entry_number))
99                         return -EFAULT;
100         }
101
102         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
103                 return -EINVAL;
104
105         set_tls_desc(p, idx, &info, 1);
106
107         return 0;
108 }
109
110 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
111 {
112         return do_set_thread_area(current, -1, u_info, 1);
113 }
114
115
116 /*
117  * Get the current Thread-Local Storage area:
118  */
119
120 static void fill_user_desc(struct user_desc *info, int idx,
121                            const struct desc_struct *desc)
122
123 {
124         memset(info, 0, sizeof(*info));
125         info->entry_number = idx;
126         info->base_addr = get_desc_base(desc);
127         info->limit = get_desc_limit(desc);
128         info->seg_32bit = desc->d;
129         info->contents = desc->type >> 2;
130         info->read_exec_only = !(desc->type & 2);
131         info->limit_in_pages = desc->g;
132         info->seg_not_present = !desc->p;
133         info->useable = desc->avl;
134 #ifdef CONFIG_X86_64
135         info->lm = desc->l;
136 #endif
137 }
138
139 int do_get_thread_area(struct task_struct *p, int idx,
140                        struct user_desc __user *u_info)
141 {
142         struct user_desc info;
143
144         if (idx == -1 && get_user(idx, &u_info->entry_number))
145                 return -EFAULT;
146
147         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
148                 return -EINVAL;
149
150         fill_user_desc(&info, idx,
151                        &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
152
153         if (copy_to_user(u_info, &info, sizeof(info)))
154                 return -EFAULT;
155         return 0;
156 }
157
158 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
159 {
160         return do_get_thread_area(current, -1, u_info);
161 }
162
163 int regset_tls_active(struct task_struct *target,
164                       const struct user_regset *regset)
165 {
166         struct thread_struct *t = &target->thread;
167         int n = GDT_ENTRY_TLS_ENTRIES;
168         while (n > 0 && desc_empty(&t->tls_array[n - 1]))
169                 --n;
170         return n;
171 }
172
173 int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
174                    unsigned int pos, unsigned int count,
175                    void *kbuf, void __user *ubuf)
176 {
177         const struct desc_struct *tls;
178
179         if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
180             (pos % sizeof(struct user_desc)) != 0 ||
181             (count % sizeof(struct user_desc)) != 0)
182                 return -EINVAL;
183
184         pos /= sizeof(struct user_desc);
185         count /= sizeof(struct user_desc);
186
187         tls = &target->thread.tls_array[pos];
188
189         if (kbuf) {
190                 struct user_desc *info = kbuf;
191                 while (count-- > 0)
192                         fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
193                                        tls++);
194         } else {
195                 struct user_desc __user *u_info = ubuf;
196                 while (count-- > 0) {
197                         struct user_desc info;
198                         fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
199                         if (__copy_to_user(u_info++, &info, sizeof(info)))
200                                 return -EFAULT;
201                 }
202         }
203
204         return 0;
205 }
206
207 int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
208                    unsigned int pos, unsigned int count,
209                    const void *kbuf, const void __user *ubuf)
210 {
211         struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
212         const struct user_desc *info;
213         int i;
214
215         if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
216             (pos % sizeof(struct user_desc)) != 0 ||
217             (count % sizeof(struct user_desc)) != 0)
218                 return -EINVAL;
219
220         if (kbuf)
221                 info = kbuf;
222         else if (__copy_from_user(infobuf, ubuf, count))
223                 return -EFAULT;
224         else
225                 info = infobuf;
226
227         for (i = 0; i < count / sizeof(struct user_desc); i++)
228                 if (!tls_desc_okay(info + i))
229                         return -EINVAL;
230
231         set_tls_desc(target,
232                      GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
233                      info, count / sizeof(struct user_desc));
234
235         return 0;
236 }