3 * Licensed under the GPL
6 #include <linux/percpu.h>
7 #include <linux/sched.h>
8 #include <linux/syscalls.h>
9 #include <linux/uaccess.h>
10 #include <asm/ptrace-abi.h>
13 #include <sysdep/tls.h>
17 * If needed we can detect when it's uninitialized.
19 * These are initialized in an initcall and unchanged thereafter.
21 static int host_supports_tls = -1;
22 int host_gdt_entry_tls_min;
24 static int do_set_thread_area(struct user_desc *info)
30 ret = os_set_thread_area(info, userspace_pid[cpu]);
34 printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
35 "index = %d\n", ret, info->entry_number);
41 * sys_get_thread_area: get a yet unused TLS descriptor index.
42 * XXX: Consider leaving one free slot for glibc usage at first place. This must
43 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
45 * Also, this must be tested when compiling in SKAS mode with dynamic linking
46 * and running against NPTL.
48 static int get_free_idx(struct task_struct* task)
50 struct thread_struct *t = &task->thread;
53 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
54 if (!t->arch.tls_array[idx].present)
55 return idx + GDT_ENTRY_TLS_MIN;
59 static inline void clear_user_desc(struct user_desc* info)
61 /* Postcondition: LDT_empty(info) returns true. */
62 memset(info, 0, sizeof(*info));
65 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
66 * indeed an empty user_desc.
68 info->read_exec_only = 1;
69 info->seg_not_present = 1;
74 static int load_TLS(int flags, struct task_struct *to)
79 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
80 struct uml_tls_struct* curr =
81 &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
84 * Actually, now if it wasn't flushed it gets cleared and
85 * flushed to the host, which will clear it.
89 clear_user_desc(&curr->tls);
90 curr->tls.entry_number = idx;
92 WARN_ON(!LDT_empty(&curr->tls));
97 if (!(flags & O_FORCE) && curr->flushed)
100 ret = do_set_thread_area(&curr->tls);
111 * Verify if we need to do a flush for the new process, i.e. if there are any
112 * present desc's, only if they haven't been flushed.
114 static inline int needs_TLS_update(struct task_struct *task)
119 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
120 struct uml_tls_struct* curr =
121 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
124 * Can't test curr->present, we may need to clear a descriptor
136 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
137 * we mark them as such and the first switch_to will do the job.
139 void clear_flushed_tls(struct task_struct *task)
143 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
144 struct uml_tls_struct* curr =
145 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
148 * Still correct to do this, if it wasn't present on the host it
149 * will remain as flushed as it was.
159 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
160 * common host process. So this is needed in SKAS0 too.
162 * However, if each thread had a different host process (and this was discussed
163 * for SMP support) this won't be needed.
165 * And this will not need be used when (and if) we'll add support to the host
169 int arch_switch_tls(struct task_struct *to)
171 if (!host_supports_tls)
175 * We have no need whatsoever to switch TLS for kernel threads; beyond
176 * that, that would also result in us calling os_set_thread_area with
177 * userspace_pid[cpu] == 0, which gives an error.
180 return load_TLS(O_FORCE, to);
185 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
186 int idx, int flushed)
188 struct thread_struct *t = &task->thread;
190 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
193 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
194 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
195 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
200 int arch_set_tls(struct task_struct *new, unsigned long tls)
202 struct user_desc info;
203 int idx, ret = -EFAULT;
205 if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
209 if (LDT_empty(&info))
212 idx = info.entry_number;
214 ret = set_tls_entry(new, &info, idx, 0);
219 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
222 struct thread_struct *t = &task->thread;
224 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
227 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
230 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
234 * Temporary debugging check, to make sure that things have been
235 * flushed. This could be triggered if load_TLS() failed.
237 if (unlikely(task == current &&
238 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
239 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
240 "without flushed TLS.", current->pid);
246 * When the TLS entry has not been set, the values read to user in the
247 * tls_array are 0 (because it's cleared at boot, see
248 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
250 clear_user_desc(info);
251 info->entry_number = idx;
255 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
257 struct user_desc info;
260 if (!host_supports_tls)
263 if (copy_from_user(&info, user_desc, sizeof(info)))
266 idx = info.entry_number;
269 idx = get_free_idx(current);
272 info.entry_number = idx;
273 /* Tell the user which slot we chose for him.*/
274 if (put_user(idx, &user_desc->entry_number))
278 ret = do_set_thread_area(&info);
281 return set_tls_entry(current, &info, idx, 1);
285 * Perform set_thread_area on behalf of the traced child.
286 * Note: error handling is not done on the deferred load, and this differ from
287 * i386. However the only possible error are caused by bugs.
289 int ptrace_set_thread_area(struct task_struct *child, int idx,
290 struct user_desc __user *user_desc)
292 struct user_desc info;
294 if (!host_supports_tls)
297 if (copy_from_user(&info, user_desc, sizeof(info)))
300 return set_tls_entry(child, &info, idx, 0);
303 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
305 struct user_desc info;
308 if (!host_supports_tls)
311 if (get_user(idx, &user_desc->entry_number))
314 ret = get_tls_entry(current, &info, idx);
318 if (copy_to_user(user_desc, &info, sizeof(info)))
326 * Perform get_thread_area on behalf of the traced child.
328 int ptrace_get_thread_area(struct task_struct *child, int idx,
329 struct user_desc __user *user_desc)
331 struct user_desc info;
334 if (!host_supports_tls)
337 ret = get_tls_entry(child, &info, idx);
341 if (copy_to_user(user_desc, &info, sizeof(info)))
348 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
349 * if a 32-bit UML is running on a 64-bit host.
351 static int __init __setup_host_supports_tls(void)
353 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
354 if (host_supports_tls) {
355 printk(KERN_INFO "Host TLS support detected\n");
356 printk(KERN_INFO "Detected host type: ");
357 switch (host_gdt_entry_tls_min) {
358 case GDT_ENTRY_TLS_MIN_I386:
359 printk(KERN_CONT "i386");
361 case GDT_ENTRY_TLS_MIN_X86_64:
362 printk(KERN_CONT "x86_64");
365 printk(KERN_CONT " (GDT indexes %d to %d)\n",
366 host_gdt_entry_tls_min,
367 host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
369 printk(KERN_ERR " Host TLS support NOT detected! "
370 "TLS support inside UML will not work\n");
374 __initcall(__setup_host_supports_tls);