]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/compat.c | |
3 | * | |
4 | * Kernel compatibililty routines for e.g. 32 bit syscall support | |
5 | * on 64 bit kernels. | |
6 | * | |
7 | * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/linkage.h> | |
15 | #include <linux/compat.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/time.h> | |
18 | #include <linux/signal.h> | |
19 | #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ | |
1da177e4 LT |
20 | #include <linux/syscalls.h> |
21 | #include <linux/unistd.h> | |
22 | #include <linux/security.h> | |
3158e941 | 23 | #include <linux/timex.h> |
1b2db9fb | 24 | #include <linux/migrate.h> |
1da177e4 LT |
25 | |
26 | #include <asm/uaccess.h> | |
1da177e4 LT |
27 | |
28 | int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) | |
29 | { | |
30 | return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || | |
31 | __get_user(ts->tv_sec, &cts->tv_sec) || | |
32 | __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | |
33 | } | |
34 | ||
35 | int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) | |
36 | { | |
37 | return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || | |
38 | __put_user(ts->tv_sec, &cts->tv_sec) || | |
39 | __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | |
40 | } | |
41 | ||
42 | static long compat_nanosleep_restart(struct restart_block *restart) | |
43 | { | |
44 | unsigned long expire = restart->arg0, now = jiffies; | |
45 | struct compat_timespec __user *rmtp; | |
46 | ||
47 | /* Did it expire while we handled signals? */ | |
48 | if (!time_after(expire, now)) | |
49 | return 0; | |
50 | ||
75bcc8c5 | 51 | expire = schedule_timeout_interruptible(expire - now); |
1da177e4 LT |
52 | if (expire == 0) |
53 | return 0; | |
54 | ||
55 | rmtp = (struct compat_timespec __user *)restart->arg1; | |
56 | if (rmtp) { | |
57 | struct compat_timespec ct; | |
58 | struct timespec t; | |
59 | ||
60 | jiffies_to_timespec(expire, &t); | |
61 | ct.tv_sec = t.tv_sec; | |
62 | ct.tv_nsec = t.tv_nsec; | |
63 | if (copy_to_user(rmtp, &ct, sizeof(ct))) | |
64 | return -EFAULT; | |
65 | } | |
66 | /* The 'restart' block is already filled in */ | |
67 | return -ERESTART_RESTARTBLOCK; | |
68 | } | |
69 | ||
70 | asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | |
71 | struct compat_timespec __user *rmtp) | |
72 | { | |
73 | struct timespec t; | |
74 | struct restart_block *restart; | |
75 | unsigned long expire; | |
76 | ||
77 | if (get_compat_timespec(&t, rqtp)) | |
78 | return -EFAULT; | |
79 | ||
80 | if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) | |
81 | return -EINVAL; | |
82 | ||
83 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); | |
75bcc8c5 | 84 | expire = schedule_timeout_interruptible(expire); |
1da177e4 LT |
85 | if (expire == 0) |
86 | return 0; | |
87 | ||
88 | if (rmtp) { | |
89 | jiffies_to_timespec(expire, &t); | |
90 | if (put_compat_timespec(&t, rmtp)) | |
91 | return -EFAULT; | |
92 | } | |
93 | restart = ¤t_thread_info()->restart_block; | |
94 | restart->fn = compat_nanosleep_restart; | |
95 | restart->arg0 = jiffies + expire; | |
96 | restart->arg1 = (unsigned long) rmtp; | |
97 | return -ERESTART_RESTARTBLOCK; | |
98 | } | |
99 | ||
100 | static inline long get_compat_itimerval(struct itimerval *o, | |
101 | struct compat_itimerval __user *i) | |
102 | { | |
103 | return (!access_ok(VERIFY_READ, i, sizeof(*i)) || | |
104 | (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | | |
105 | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | | |
106 | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | | |
107 | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); | |
108 | } | |
109 | ||
110 | static inline long put_compat_itimerval(struct compat_itimerval __user *o, | |
111 | struct itimerval *i) | |
112 | { | |
113 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || | |
114 | (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | | |
115 | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | | |
116 | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | | |
117 | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); | |
118 | } | |
119 | ||
120 | asmlinkage long compat_sys_getitimer(int which, | |
121 | struct compat_itimerval __user *it) | |
122 | { | |
123 | struct itimerval kit; | |
124 | int error; | |
125 | ||
126 | error = do_getitimer(which, &kit); | |
127 | if (!error && put_compat_itimerval(it, &kit)) | |
128 | error = -EFAULT; | |
129 | return error; | |
130 | } | |
131 | ||
132 | asmlinkage long compat_sys_setitimer(int which, | |
133 | struct compat_itimerval __user *in, | |
134 | struct compat_itimerval __user *out) | |
135 | { | |
136 | struct itimerval kin, kout; | |
137 | int error; | |
138 | ||
139 | if (in) { | |
140 | if (get_compat_itimerval(&kin, in)) | |
141 | return -EFAULT; | |
142 | } else | |
143 | memset(&kin, 0, sizeof(kin)); | |
144 | ||
145 | error = do_setitimer(which, &kin, out ? &kout : NULL); | |
146 | if (error || !out) | |
147 | return error; | |
148 | if (put_compat_itimerval(out, &kout)) | |
149 | return -EFAULT; | |
150 | return 0; | |
151 | } | |
152 | ||
153 | asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) | |
154 | { | |
155 | /* | |
156 | * In the SMP world we might just be unlucky and have one of | |
157 | * the times increment as we use it. Since the value is an | |
158 | * atomically safe type this is just fine. Conceptually its | |
159 | * as if the syscall took an instant longer to occur. | |
160 | */ | |
161 | if (tbuf) { | |
162 | struct compat_tms tmp; | |
163 | struct task_struct *tsk = current; | |
164 | struct task_struct *t; | |
165 | cputime_t utime, stime, cutime, cstime; | |
166 | ||
167 | read_lock(&tasklist_lock); | |
168 | utime = tsk->signal->utime; | |
169 | stime = tsk->signal->stime; | |
170 | t = tsk; | |
171 | do { | |
172 | utime = cputime_add(utime, t->utime); | |
173 | stime = cputime_add(stime, t->stime); | |
174 | t = next_thread(t); | |
175 | } while (t != tsk); | |
176 | ||
177 | /* | |
178 | * While we have tasklist_lock read-locked, no dying thread | |
179 | * can be updating current->signal->[us]time. Instead, | |
180 | * we got their counts included in the live thread loop. | |
181 | * However, another thread can come in right now and | |
182 | * do a wait call that updates current->signal->c[us]time. | |
183 | * To make sure we always see that pair updated atomically, | |
184 | * we take the siglock around fetching them. | |
185 | */ | |
186 | spin_lock_irq(&tsk->sighand->siglock); | |
187 | cutime = tsk->signal->cutime; | |
188 | cstime = tsk->signal->cstime; | |
189 | spin_unlock_irq(&tsk->sighand->siglock); | |
190 | read_unlock(&tasklist_lock); | |
191 | ||
192 | tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime)); | |
193 | tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime)); | |
194 | tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime)); | |
195 | tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime)); | |
196 | if (copy_to_user(tbuf, &tmp, sizeof(tmp))) | |
197 | return -EFAULT; | |
198 | } | |
199 | return compat_jiffies_to_clock_t(jiffies); | |
200 | } | |
201 | ||
202 | /* | |
203 | * Assumption: old_sigset_t and compat_old_sigset_t are both | |
204 | * types that can be passed to put_user()/get_user(). | |
205 | */ | |
206 | ||
207 | asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) | |
208 | { | |
209 | old_sigset_t s; | |
210 | long ret; | |
211 | mm_segment_t old_fs = get_fs(); | |
212 | ||
213 | set_fs(KERNEL_DS); | |
214 | ret = sys_sigpending((old_sigset_t __user *) &s); | |
215 | set_fs(old_fs); | |
216 | if (ret == 0) | |
217 | ret = put_user(s, set); | |
218 | return ret; | |
219 | } | |
220 | ||
221 | asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, | |
222 | compat_old_sigset_t __user *oset) | |
223 | { | |
224 | old_sigset_t s; | |
225 | long ret; | |
226 | mm_segment_t old_fs; | |
227 | ||
228 | if (set && get_user(s, set)) | |
229 | return -EFAULT; | |
230 | old_fs = get_fs(); | |
231 | set_fs(KERNEL_DS); | |
232 | ret = sys_sigprocmask(how, | |
233 | set ? (old_sigset_t __user *) &s : NULL, | |
234 | oset ? (old_sigset_t __user *) &s : NULL); | |
235 | set_fs(old_fs); | |
236 | if (ret == 0) | |
237 | if (oset) | |
238 | ret = put_user(s, oset); | |
239 | return ret; | |
240 | } | |
241 | ||
1da177e4 LT |
242 | asmlinkage long compat_sys_setrlimit(unsigned int resource, |
243 | struct compat_rlimit __user *rlim) | |
244 | { | |
245 | struct rlimit r; | |
246 | int ret; | |
247 | mm_segment_t old_fs = get_fs (); | |
248 | ||
249 | if (resource >= RLIM_NLIMITS) | |
250 | return -EINVAL; | |
251 | ||
252 | if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || | |
253 | __get_user(r.rlim_cur, &rlim->rlim_cur) || | |
254 | __get_user(r.rlim_max, &rlim->rlim_max)) | |
255 | return -EFAULT; | |
256 | ||
257 | if (r.rlim_cur == COMPAT_RLIM_INFINITY) | |
258 | r.rlim_cur = RLIM_INFINITY; | |
259 | if (r.rlim_max == COMPAT_RLIM_INFINITY) | |
260 | r.rlim_max = RLIM_INFINITY; | |
261 | set_fs(KERNEL_DS); | |
262 | ret = sys_setrlimit(resource, (struct rlimit __user *) &r); | |
263 | set_fs(old_fs); | |
264 | return ret; | |
265 | } | |
266 | ||
267 | #ifdef COMPAT_RLIM_OLD_INFINITY | |
268 | ||
269 | asmlinkage long compat_sys_old_getrlimit(unsigned int resource, | |
270 | struct compat_rlimit __user *rlim) | |
271 | { | |
272 | struct rlimit r; | |
273 | int ret; | |
274 | mm_segment_t old_fs = get_fs(); | |
275 | ||
276 | set_fs(KERNEL_DS); | |
277 | ret = sys_old_getrlimit(resource, &r); | |
278 | set_fs(old_fs); | |
279 | ||
280 | if (!ret) { | |
281 | if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) | |
282 | r.rlim_cur = COMPAT_RLIM_INFINITY; | |
283 | if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) | |
284 | r.rlim_max = COMPAT_RLIM_INFINITY; | |
285 | ||
286 | if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || | |
287 | __put_user(r.rlim_cur, &rlim->rlim_cur) || | |
288 | __put_user(r.rlim_max, &rlim->rlim_max)) | |
289 | return -EFAULT; | |
290 | } | |
291 | return ret; | |
292 | } | |
293 | ||
294 | #endif | |
295 | ||
296 | asmlinkage long compat_sys_getrlimit (unsigned int resource, | |
297 | struct compat_rlimit __user *rlim) | |
298 | { | |
299 | struct rlimit r; | |
300 | int ret; | |
301 | mm_segment_t old_fs = get_fs(); | |
302 | ||
303 | set_fs(KERNEL_DS); | |
304 | ret = sys_getrlimit(resource, (struct rlimit __user *) &r); | |
305 | set_fs(old_fs); | |
306 | if (!ret) { | |
307 | if (r.rlim_cur > COMPAT_RLIM_INFINITY) | |
308 | r.rlim_cur = COMPAT_RLIM_INFINITY; | |
309 | if (r.rlim_max > COMPAT_RLIM_INFINITY) | |
310 | r.rlim_max = COMPAT_RLIM_INFINITY; | |
311 | ||
312 | if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || | |
313 | __put_user(r.rlim_cur, &rlim->rlim_cur) || | |
314 | __put_user(r.rlim_max, &rlim->rlim_max)) | |
315 | return -EFAULT; | |
316 | } | |
317 | return ret; | |
318 | } | |
319 | ||
320 | int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) | |
321 | { | |
322 | if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || | |
323 | __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || | |
324 | __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || | |
325 | __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || | |
326 | __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || | |
327 | __put_user(r->ru_maxrss, &ru->ru_maxrss) || | |
328 | __put_user(r->ru_ixrss, &ru->ru_ixrss) || | |
329 | __put_user(r->ru_idrss, &ru->ru_idrss) || | |
330 | __put_user(r->ru_isrss, &ru->ru_isrss) || | |
331 | __put_user(r->ru_minflt, &ru->ru_minflt) || | |
332 | __put_user(r->ru_majflt, &ru->ru_majflt) || | |
333 | __put_user(r->ru_nswap, &ru->ru_nswap) || | |
334 | __put_user(r->ru_inblock, &ru->ru_inblock) || | |
335 | __put_user(r->ru_oublock, &ru->ru_oublock) || | |
336 | __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || | |
337 | __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || | |
338 | __put_user(r->ru_nsignals, &ru->ru_nsignals) || | |
339 | __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || | |
340 | __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) | |
341 | return -EFAULT; | |
342 | return 0; | |
343 | } | |
344 | ||
345 | asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) | |
346 | { | |
347 | struct rusage r; | |
348 | int ret; | |
349 | mm_segment_t old_fs = get_fs(); | |
350 | ||
351 | set_fs(KERNEL_DS); | |
352 | ret = sys_getrusage(who, (struct rusage __user *) &r); | |
353 | set_fs(old_fs); | |
354 | ||
355 | if (ret) | |
356 | return ret; | |
357 | ||
358 | if (put_compat_rusage(&r, ru)) | |
359 | return -EFAULT; | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
364 | asmlinkage long | |
365 | compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, | |
366 | struct compat_rusage __user *ru) | |
367 | { | |
368 | if (!ru) { | |
369 | return sys_wait4(pid, stat_addr, options, NULL); | |
370 | } else { | |
371 | struct rusage r; | |
372 | int ret; | |
373 | unsigned int status; | |
374 | mm_segment_t old_fs = get_fs(); | |
375 | ||
376 | set_fs (KERNEL_DS); | |
377 | ret = sys_wait4(pid, | |
378 | (stat_addr ? | |
379 | (unsigned int __user *) &status : NULL), | |
380 | options, (struct rusage __user *) &r); | |
381 | set_fs (old_fs); | |
382 | ||
383 | if (ret > 0) { | |
384 | if (put_compat_rusage(&r, ru)) | |
385 | return -EFAULT; | |
386 | if (stat_addr && put_user(status, stat_addr)) | |
387 | return -EFAULT; | |
388 | } | |
389 | return ret; | |
390 | } | |
391 | } | |
392 | ||
393 | asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | |
394 | struct compat_siginfo __user *uinfo, int options, | |
395 | struct compat_rusage __user *uru) | |
396 | { | |
397 | siginfo_t info; | |
398 | struct rusage ru; | |
399 | long ret; | |
400 | mm_segment_t old_fs = get_fs(); | |
401 | ||
402 | memset(&info, 0, sizeof(info)); | |
403 | ||
404 | set_fs(KERNEL_DS); | |
405 | ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, | |
406 | uru ? (struct rusage __user *)&ru : NULL); | |
407 | set_fs(old_fs); | |
408 | ||
409 | if ((ret < 0) || (info.si_signo == 0)) | |
410 | return ret; | |
411 | ||
412 | if (uru) { | |
413 | ret = put_compat_rusage(&ru, uru); | |
414 | if (ret) | |
415 | return ret; | |
416 | } | |
417 | ||
418 | BUG_ON(info.si_code & __SI_MASK); | |
419 | info.si_code |= __SI_CHLD; | |
420 | return copy_siginfo_to_user32(uinfo, &info); | |
421 | } | |
422 | ||
423 | static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, | |
424 | unsigned len, cpumask_t *new_mask) | |
425 | { | |
426 | unsigned long *k; | |
427 | ||
428 | if (len < sizeof(cpumask_t)) | |
429 | memset(new_mask, 0, sizeof(cpumask_t)); | |
430 | else if (len > sizeof(cpumask_t)) | |
431 | len = sizeof(cpumask_t); | |
432 | ||
433 | k = cpus_addr(*new_mask); | |
434 | return compat_get_bitmap(k, user_mask_ptr, len * 8); | |
435 | } | |
436 | ||
437 | asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, | |
438 | unsigned int len, | |
439 | compat_ulong_t __user *user_mask_ptr) | |
440 | { | |
441 | cpumask_t new_mask; | |
442 | int retval; | |
443 | ||
444 | retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); | |
445 | if (retval) | |
446 | return retval; | |
447 | ||
448 | return sched_setaffinity(pid, new_mask); | |
449 | } | |
450 | ||
451 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, | |
452 | compat_ulong_t __user *user_mask_ptr) | |
453 | { | |
454 | int ret; | |
455 | cpumask_t mask; | |
456 | unsigned long *k; | |
457 | unsigned int min_length = sizeof(cpumask_t); | |
458 | ||
459 | if (NR_CPUS <= BITS_PER_COMPAT_LONG) | |
460 | min_length = sizeof(compat_ulong_t); | |
461 | ||
462 | if (len < min_length) | |
463 | return -EINVAL; | |
464 | ||
465 | ret = sched_getaffinity(pid, &mask); | |
466 | if (ret < 0) | |
467 | return ret; | |
468 | ||
469 | k = cpus_addr(mask); | |
470 | ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); | |
471 | if (ret) | |
472 | return ret; | |
473 | ||
474 | return min_length; | |
475 | } | |
476 | ||
477 | static int get_compat_itimerspec(struct itimerspec *dst, | |
478 | struct compat_itimerspec __user *src) | |
479 | { | |
480 | if (get_compat_timespec(&dst->it_interval, &src->it_interval) || | |
481 | get_compat_timespec(&dst->it_value, &src->it_value)) | |
482 | return -EFAULT; | |
483 | return 0; | |
484 | } | |
485 | ||
486 | static int put_compat_itimerspec(struct compat_itimerspec __user *dst, | |
487 | struct itimerspec *src) | |
488 | { | |
489 | if (put_compat_timespec(&src->it_interval, &dst->it_interval) || | |
490 | put_compat_timespec(&src->it_value, &dst->it_value)) | |
491 | return -EFAULT; | |
492 | return 0; | |
493 | } | |
494 | ||
3a0f69d5 CH |
495 | long compat_sys_timer_create(clockid_t which_clock, |
496 | struct compat_sigevent __user *timer_event_spec, | |
497 | timer_t __user *created_timer_id) | |
498 | { | |
499 | struct sigevent __user *event = NULL; | |
500 | ||
501 | if (timer_event_spec) { | |
502 | struct sigevent kevent; | |
503 | ||
504 | event = compat_alloc_user_space(sizeof(*event)); | |
505 | if (get_compat_sigevent(&kevent, timer_event_spec) || | |
506 | copy_to_user(event, &kevent, sizeof(*event))) | |
507 | return -EFAULT; | |
508 | } | |
509 | ||
510 | return sys_timer_create(which_clock, event, created_timer_id); | |
511 | } | |
512 | ||
1da177e4 LT |
513 | long compat_sys_timer_settime(timer_t timer_id, int flags, |
514 | struct compat_itimerspec __user *new, | |
515 | struct compat_itimerspec __user *old) | |
516 | { | |
517 | long err; | |
518 | mm_segment_t oldfs; | |
519 | struct itimerspec newts, oldts; | |
520 | ||
521 | if (!new) | |
522 | return -EINVAL; | |
523 | if (get_compat_itimerspec(&newts, new)) | |
524 | return -EFAULT; | |
525 | oldfs = get_fs(); | |
526 | set_fs(KERNEL_DS); | |
527 | err = sys_timer_settime(timer_id, flags, | |
528 | (struct itimerspec __user *) &newts, | |
529 | (struct itimerspec __user *) &oldts); | |
530 | set_fs(oldfs); | |
531 | if (!err && old && put_compat_itimerspec(old, &oldts)) | |
532 | return -EFAULT; | |
533 | return err; | |
534 | } | |
535 | ||
536 | long compat_sys_timer_gettime(timer_t timer_id, | |
537 | struct compat_itimerspec __user *setting) | |
538 | { | |
539 | long err; | |
540 | mm_segment_t oldfs; | |
541 | struct itimerspec ts; | |
542 | ||
543 | oldfs = get_fs(); | |
544 | set_fs(KERNEL_DS); | |
545 | err = sys_timer_gettime(timer_id, | |
546 | (struct itimerspec __user *) &ts); | |
547 | set_fs(oldfs); | |
548 | if (!err && put_compat_itimerspec(setting, &ts)) | |
549 | return -EFAULT; | |
550 | return err; | |
551 | } | |
552 | ||
553 | long compat_sys_clock_settime(clockid_t which_clock, | |
554 | struct compat_timespec __user *tp) | |
555 | { | |
556 | long err; | |
557 | mm_segment_t oldfs; | |
558 | struct timespec ts; | |
559 | ||
560 | if (get_compat_timespec(&ts, tp)) | |
561 | return -EFAULT; | |
562 | oldfs = get_fs(); | |
563 | set_fs(KERNEL_DS); | |
564 | err = sys_clock_settime(which_clock, | |
565 | (struct timespec __user *) &ts); | |
566 | set_fs(oldfs); | |
567 | return err; | |
568 | } | |
569 | ||
570 | long compat_sys_clock_gettime(clockid_t which_clock, | |
571 | struct compat_timespec __user *tp) | |
572 | { | |
573 | long err; | |
574 | mm_segment_t oldfs; | |
575 | struct timespec ts; | |
576 | ||
577 | oldfs = get_fs(); | |
578 | set_fs(KERNEL_DS); | |
579 | err = sys_clock_gettime(which_clock, | |
580 | (struct timespec __user *) &ts); | |
581 | set_fs(oldfs); | |
582 | if (!err && put_compat_timespec(&ts, tp)) | |
583 | return -EFAULT; | |
584 | return err; | |
585 | } | |
586 | ||
587 | long compat_sys_clock_getres(clockid_t which_clock, | |
588 | struct compat_timespec __user *tp) | |
589 | { | |
590 | long err; | |
591 | mm_segment_t oldfs; | |
592 | struct timespec ts; | |
593 | ||
594 | oldfs = get_fs(); | |
595 | set_fs(KERNEL_DS); | |
596 | err = sys_clock_getres(which_clock, | |
597 | (struct timespec __user *) &ts); | |
598 | set_fs(oldfs); | |
599 | if (!err && tp && put_compat_timespec(&ts, tp)) | |
600 | return -EFAULT; | |
601 | return err; | |
602 | } | |
603 | ||
604 | long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | |
605 | struct compat_timespec __user *rqtp, | |
606 | struct compat_timespec __user *rmtp) | |
607 | { | |
608 | long err; | |
609 | mm_segment_t oldfs; | |
610 | struct timespec in, out; | |
611 | ||
612 | if (get_compat_timespec(&in, rqtp)) | |
613 | return -EFAULT; | |
614 | ||
615 | oldfs = get_fs(); | |
616 | set_fs(KERNEL_DS); | |
617 | err = sys_clock_nanosleep(which_clock, flags, | |
618 | (struct timespec __user *) &in, | |
619 | (struct timespec __user *) &out); | |
620 | set_fs(oldfs); | |
621 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && | |
622 | put_compat_timespec(&out, rmtp)) | |
623 | return -EFAULT; | |
624 | return err; | |
625 | } | |
626 | ||
627 | /* | |
628 | * We currently only need the following fields from the sigevent | |
629 | * structure: sigev_value, sigev_signo, sig_notify and (sometimes | |
630 | * sigev_notify_thread_id). The others are handled in user mode. | |
631 | * We also assume that copying sigev_value.sival_int is sufficient | |
632 | * to keep all the bits of sigev_value.sival_ptr intact. | |
633 | */ | |
634 | int get_compat_sigevent(struct sigevent *event, | |
635 | const struct compat_sigevent __user *u_event) | |
636 | { | |
51410d3c | 637 | memset(event, 0, sizeof(*event)); |
1da177e4 LT |
638 | return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || |
639 | __get_user(event->sigev_value.sival_int, | |
640 | &u_event->sigev_value.sival_int) || | |
641 | __get_user(event->sigev_signo, &u_event->sigev_signo) || | |
642 | __get_user(event->sigev_notify, &u_event->sigev_notify) || | |
643 | __get_user(event->sigev_notify_thread_id, | |
644 | &u_event->sigev_notify_thread_id)) | |
645 | ? -EFAULT : 0; | |
646 | } | |
647 | ||
1da177e4 LT |
648 | long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, |
649 | unsigned long bitmap_size) | |
650 | { | |
651 | int i, j; | |
652 | unsigned long m; | |
653 | compat_ulong_t um; | |
654 | unsigned long nr_compat_longs; | |
655 | ||
656 | /* align bitmap up to nearest compat_long_t boundary */ | |
657 | bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); | |
658 | ||
659 | if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) | |
660 | return -EFAULT; | |
661 | ||
662 | nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); | |
663 | ||
664 | for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { | |
665 | m = 0; | |
666 | ||
667 | for (j = 0; j < sizeof(m)/sizeof(um); j++) { | |
668 | /* | |
669 | * We dont want to read past the end of the userspace | |
670 | * bitmap. We must however ensure the end of the | |
671 | * kernel bitmap is zeroed. | |
672 | */ | |
673 | if (nr_compat_longs-- > 0) { | |
674 | if (__get_user(um, umask)) | |
675 | return -EFAULT; | |
676 | } else { | |
677 | um = 0; | |
678 | } | |
679 | ||
680 | umask++; | |
681 | m |= (long)um << (j * BITS_PER_COMPAT_LONG); | |
682 | } | |
683 | *mask++ = m; | |
684 | } | |
685 | ||
686 | return 0; | |
687 | } | |
688 | ||
689 | long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, | |
690 | unsigned long bitmap_size) | |
691 | { | |
692 | int i, j; | |
693 | unsigned long m; | |
694 | compat_ulong_t um; | |
695 | unsigned long nr_compat_longs; | |
696 | ||
697 | /* align bitmap up to nearest compat_long_t boundary */ | |
698 | bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); | |
699 | ||
700 | if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) | |
701 | return -EFAULT; | |
702 | ||
703 | nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); | |
704 | ||
705 | for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { | |
706 | m = *mask++; | |
707 | ||
708 | for (j = 0; j < sizeof(m)/sizeof(um); j++) { | |
709 | um = m; | |
710 | ||
711 | /* | |
712 | * We dont want to write past the end of the userspace | |
713 | * bitmap. | |
714 | */ | |
715 | if (nr_compat_longs-- > 0) { | |
716 | if (__put_user(um, umask)) | |
717 | return -EFAULT; | |
718 | } | |
719 | ||
720 | umask++; | |
721 | m >>= 4*sizeof(um); | |
722 | m >>= 4*sizeof(um); | |
723 | } | |
724 | } | |
725 | ||
726 | return 0; | |
727 | } | |
728 | ||
729 | void | |
730 | sigset_from_compat (sigset_t *set, compat_sigset_t *compat) | |
731 | { | |
732 | switch (_NSIG_WORDS) { | |
1da177e4 LT |
733 | case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); |
734 | case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); | |
735 | case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); | |
736 | case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); | |
1da177e4 LT |
737 | } |
738 | } | |
739 | ||
740 | asmlinkage long | |
741 | compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |
742 | struct compat_siginfo __user *uinfo, | |
743 | struct compat_timespec __user *uts, compat_size_t sigsetsize) | |
744 | { | |
745 | compat_sigset_t s32; | |
746 | sigset_t s; | |
747 | int sig; | |
748 | struct timespec t; | |
749 | siginfo_t info; | |
750 | long ret, timeout = 0; | |
751 | ||
752 | if (sigsetsize != sizeof(sigset_t)) | |
753 | return -EINVAL; | |
754 | ||
755 | if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) | |
756 | return -EFAULT; | |
757 | sigset_from_compat(&s, &s32); | |
758 | sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
759 | signotset(&s); | |
760 | ||
761 | if (uts) { | |
762 | if (get_compat_timespec (&t, uts)) | |
763 | return -EFAULT; | |
764 | if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 | |
765 | || t.tv_sec < 0) | |
766 | return -EINVAL; | |
767 | } | |
768 | ||
769 | spin_lock_irq(¤t->sighand->siglock); | |
770 | sig = dequeue_signal(current, &s, &info); | |
771 | if (!sig) { | |
772 | timeout = MAX_SCHEDULE_TIMEOUT; | |
773 | if (uts) | |
774 | timeout = timespec_to_jiffies(&t) | |
775 | +(t.tv_sec || t.tv_nsec); | |
776 | if (timeout) { | |
777 | current->real_blocked = current->blocked; | |
778 | sigandsets(¤t->blocked, ¤t->blocked, &s); | |
779 | ||
780 | recalc_sigpending(); | |
781 | spin_unlock_irq(¤t->sighand->siglock); | |
782 | ||
75bcc8c5 | 783 | timeout = schedule_timeout_interruptible(timeout); |
1da177e4 LT |
784 | |
785 | spin_lock_irq(¤t->sighand->siglock); | |
786 | sig = dequeue_signal(current, &s, &info); | |
787 | current->blocked = current->real_blocked; | |
788 | siginitset(¤t->real_blocked, 0); | |
789 | recalc_sigpending(); | |
790 | } | |
791 | } | |
792 | spin_unlock_irq(¤t->sighand->siglock); | |
793 | ||
794 | if (sig) { | |
795 | ret = sig; | |
796 | if (uinfo) { | |
797 | if (copy_siginfo_to_user32(uinfo, &info)) | |
798 | ret = -EFAULT; | |
799 | } | |
800 | }else { | |
801 | ret = timeout?-EINTR:-EAGAIN; | |
802 | } | |
803 | return ret; | |
804 | ||
805 | } | |
806 | ||
807 | #ifdef __ARCH_WANT_COMPAT_SYS_TIME | |
808 | ||
809 | /* compat_time_t is a 32 bit "long" and needs to get converted. */ | |
810 | ||
811 | asmlinkage long compat_sys_time(compat_time_t __user * tloc) | |
812 | { | |
813 | compat_time_t i; | |
814 | struct timeval tv; | |
815 | ||
816 | do_gettimeofday(&tv); | |
817 | i = tv.tv_sec; | |
818 | ||
819 | if (tloc) { | |
820 | if (put_user(i,tloc)) | |
821 | i = -EFAULT; | |
822 | } | |
823 | return i; | |
824 | } | |
825 | ||
826 | asmlinkage long compat_sys_stime(compat_time_t __user *tptr) | |
827 | { | |
828 | struct timespec tv; | |
829 | int err; | |
830 | ||
831 | if (get_user(tv.tv_sec, tptr)) | |
832 | return -EFAULT; | |
833 | ||
834 | tv.tv_nsec = 0; | |
835 | ||
836 | err = security_settime(&tv, NULL); | |
837 | if (err) | |
838 | return err; | |
839 | ||
840 | do_settimeofday(&tv); | |
841 | return 0; | |
842 | } | |
843 | ||
844 | #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ | |
150256d8 DW |
845 | |
846 | #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | |
847 | asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) | |
848 | { | |
849 | sigset_t newset; | |
850 | compat_sigset_t newset32; | |
851 | ||
852 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
853 | if (sigsetsize != sizeof(sigset_t)) | |
854 | return -EINVAL; | |
855 | ||
856 | if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) | |
857 | return -EFAULT; | |
858 | sigset_from_compat(&newset, &newset32); | |
859 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
860 | ||
861 | spin_lock_irq(¤t->sighand->siglock); | |
862 | current->saved_sigmask = current->blocked; | |
863 | current->blocked = newset; | |
864 | recalc_sigpending(); | |
865 | spin_unlock_irq(¤t->sighand->siglock); | |
866 | ||
867 | current->state = TASK_INTERRUPTIBLE; | |
868 | schedule(); | |
869 | set_thread_flag(TIF_RESTORE_SIGMASK); | |
870 | return -ERESTARTNOHAND; | |
871 | } | |
872 | #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ | |
3158e941 SR |
873 | |
874 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) | |
875 | { | |
876 | struct timex txc; | |
877 | int ret; | |
878 | ||
879 | memset(&txc, 0, sizeof(struct timex)); | |
880 | ||
881 | if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || | |
882 | __get_user(txc.modes, &utp->modes) || | |
883 | __get_user(txc.offset, &utp->offset) || | |
884 | __get_user(txc.freq, &utp->freq) || | |
885 | __get_user(txc.maxerror, &utp->maxerror) || | |
886 | __get_user(txc.esterror, &utp->esterror) || | |
887 | __get_user(txc.status, &utp->status) || | |
888 | __get_user(txc.constant, &utp->constant) || | |
889 | __get_user(txc.precision, &utp->precision) || | |
890 | __get_user(txc.tolerance, &utp->tolerance) || | |
891 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | |
892 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | |
893 | __get_user(txc.tick, &utp->tick) || | |
894 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | |
895 | __get_user(txc.jitter, &utp->jitter) || | |
896 | __get_user(txc.shift, &utp->shift) || | |
897 | __get_user(txc.stabil, &utp->stabil) || | |
898 | __get_user(txc.jitcnt, &utp->jitcnt) || | |
899 | __get_user(txc.calcnt, &utp->calcnt) || | |
900 | __get_user(txc.errcnt, &utp->errcnt) || | |
901 | __get_user(txc.stbcnt, &utp->stbcnt)) | |
902 | return -EFAULT; | |
903 | ||
904 | ret = do_adjtimex(&txc); | |
905 | ||
906 | if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || | |
907 | __put_user(txc.modes, &utp->modes) || | |
908 | __put_user(txc.offset, &utp->offset) || | |
909 | __put_user(txc.freq, &utp->freq) || | |
910 | __put_user(txc.maxerror, &utp->maxerror) || | |
911 | __put_user(txc.esterror, &utp->esterror) || | |
912 | __put_user(txc.status, &utp->status) || | |
913 | __put_user(txc.constant, &utp->constant) || | |
914 | __put_user(txc.precision, &utp->precision) || | |
915 | __put_user(txc.tolerance, &utp->tolerance) || | |
916 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | |
917 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | |
918 | __put_user(txc.tick, &utp->tick) || | |
919 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | |
920 | __put_user(txc.jitter, &utp->jitter) || | |
921 | __put_user(txc.shift, &utp->shift) || | |
922 | __put_user(txc.stabil, &utp->stabil) || | |
923 | __put_user(txc.jitcnt, &utp->jitcnt) || | |
924 | __put_user(txc.calcnt, &utp->calcnt) || | |
925 | __put_user(txc.errcnt, &utp->errcnt) || | |
926 | __put_user(txc.stbcnt, &utp->stbcnt)) | |
927 | ret = -EFAULT; | |
928 | ||
929 | return ret; | |
930 | } | |
1b2db9fb CL |
931 | |
932 | #ifdef CONFIG_NUMA | |
933 | asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | |
9216dfad | 934 | compat_uptr_t __user *pages32, |
1b2db9fb CL |
935 | const int __user *nodes, |
936 | int __user *status, | |
937 | int flags) | |
938 | { | |
939 | const void __user * __user *pages; | |
940 | int i; | |
941 | ||
942 | pages = compat_alloc_user_space(nr_pages * sizeof(void *)); | |
943 | for (i = 0; i < nr_pages; i++) { | |
944 | compat_uptr_t p; | |
945 | ||
9216dfad | 946 | if (get_user(p, pages32 + i) || |
1b2db9fb CL |
947 | put_user(compat_ptr(p), pages + i)) |
948 | return -EFAULT; | |
949 | } | |
950 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); | |
951 | } | |
952 | #endif |