1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
8 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
9 * them correctly. Now the emulation will be in a
10 * consistent state after stackfaults - Kasper Dupont
13 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
16 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
17 * caused by Kasper Dupont's changes - Stas Sergeev
19 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
22 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
25 * 9 apr 2002 - Changed stack access macros to jump to a label
26 * instead of returning to userspace. This simplifies
27 * do_int, and is needed by handle_vm6_fault. Kasper
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/interrupt.h>
37 #include <linux/syscalls.h>
38 #include <linux/sched.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/kernel.h>
41 #include <linux/signal.h>
42 #include <linux/string.h>
44 #include <linux/smp.h>
45 #include <linux/highmem.h>
46 #include <linux/ptrace.h>
47 #include <linux/audit.h>
48 #include <linux/stddef.h>
49 #include <linux/slab.h>
50 #include <linux/security.h>
52 #include <linux/uaccess.h>
54 #include <asm/tlbflush.h>
56 #include <asm/traps.h>
58 #include <asm/switch_to.h>
63 * Interrupt handling is not guaranteed:
64 * - a real x86 will disable all interrupts for one instruction
65 * after a "mov ss,xx" to make stack handling atomic even without
66 * the 'lss' instruction. We can't guarantee this in v86 mode,
67 * as the next instruction might result in a page fault or similar.
68 * - a real x86 will have interrupts disabled for one instruction
69 * past the 'sti' that enables them. We don't bother with all the
72 * Let's hope these problems do not actually matter for anything.
77 * 8- and 16-bit register defines..
79 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
80 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
81 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
82 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
85 * virtual flags (16 and 32-bit versions)
87 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
88 #define VEFLAGS (current->thread.vm86->veflags)
90 #define set_flags(X, new, mask) \
91 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
93 #define SAFE_MASK (0xDD5)
94 #define RETURN_MASK (0xDFF)
96 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
100 struct vm86 *vm86 = current->thread.vm86;
104 * This gets called from entry.S with interrupts disabled, but
105 * from process context. Enable interrupts here, before trying
106 * to access user space.
110 if (!vm86 || !vm86->user_vm86) {
111 pr_alert("no user_vm86: BAD\n");
114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
115 user = vm86->user_vm86;
117 if (!access_ok(user, vm86->vm86plus.is_vm86pus ?
118 sizeof(struct vm86plus_struct) :
119 sizeof(struct vm86_struct))) {
120 pr_alert("could not access userspace vm86 info\n");
125 put_user_ex(regs->pt.bx, &user->regs.ebx);
126 put_user_ex(regs->pt.cx, &user->regs.ecx);
127 put_user_ex(regs->pt.dx, &user->regs.edx);
128 put_user_ex(regs->pt.si, &user->regs.esi);
129 put_user_ex(regs->pt.di, &user->regs.edi);
130 put_user_ex(regs->pt.bp, &user->regs.ebp);
131 put_user_ex(regs->pt.ax, &user->regs.eax);
132 put_user_ex(regs->pt.ip, &user->regs.eip);
133 put_user_ex(regs->pt.cs, &user->regs.cs);
134 put_user_ex(regs->pt.flags, &user->regs.eflags);
135 put_user_ex(regs->pt.sp, &user->regs.esp);
136 put_user_ex(regs->pt.ss, &user->regs.ss);
137 put_user_ex(regs->es, &user->regs.es);
138 put_user_ex(regs->ds, &user->regs.ds);
139 put_user_ex(regs->fs, &user->regs.fs);
140 put_user_ex(regs->gs, &user->regs.gs);
142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
143 } put_user_catch(err);
145 pr_alert("could not access userspace vm86 info\n");
150 tsk->thread.sp0 = vm86->saved_sp0;
151 tsk->thread.sysenter_cs = __KERNEL_CS;
152 update_task_stack(tsk);
153 refresh_sysenter_cs(&tsk->thread);
157 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
159 lazy_load_gs(vm86->regs32.gs);
161 regs->pt.ax = retval;
164 static void mark_screen_rdonly(struct mm_struct *mm)
166 struct vm_area_struct *vma;
175 down_write(&mm->mmap_sem);
176 pgd = pgd_offset(mm, 0xA0000);
177 if (pgd_none_or_clear_bad(pgd))
179 p4d = p4d_offset(pgd, 0xA0000);
180 if (p4d_none_or_clear_bad(p4d))
182 pud = pud_offset(p4d, 0xA0000);
183 if (pud_none_or_clear_bad(pud))
185 pmd = pmd_offset(pud, 0xA0000);
187 if (pmd_trans_huge(*pmd)) {
188 vma = find_vma(mm, 0xA0000);
189 split_huge_pmd(vma, pmd, 0xA0000);
191 if (pmd_none_or_clear_bad(pmd))
193 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
194 for (i = 0; i < 32; i++) {
195 if (pte_present(*pte))
196 set_pte(pte, pte_wrprotect(*pte));
199 pte_unmap_unlock(pte, ptl);
201 up_write(&mm->mmap_sem);
202 flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
207 static int do_vm86_irq_handling(int subfunction, int irqnumber);
208 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
210 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
212 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
216 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
219 case VM86_REQUEST_IRQ:
221 case VM86_GET_IRQ_BITS:
222 case VM86_GET_AND_RESET_IRQ:
223 return do_vm86_irq_handling(cmd, (int)arg);
224 case VM86_PLUS_INSTALL_CHECK:
226 * NOTE: on old vm86 stuff this will return the error
227 * from access_ok(), because the subfunction is
228 * interpreted as (invalid) address to vm86_struct.
229 * So the installation check works.
234 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
235 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
239 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
241 struct task_struct *tsk = current;
242 struct vm86 *vm86 = tsk->thread.vm86;
243 struct kernel_vm86_regs vm86regs;
244 struct pt_regs *regs = current_pt_regs();
245 unsigned long err = 0;
247 err = security_mmap_addr(0);
250 * vm86 cannot virtualize the address space, so vm86 users
251 * need to manage the low 1MB themselves using mmap. Given
252 * that BIOS places important data in the first page, vm86
253 * is essentially useless if mmap_min_addr != 0. DOSEMU,
254 * for example, won't even bother trying to use vm86 if it
255 * can't map a page at virtual address 0.
257 * To reduce the available kernel attack surface, simply
258 * disallow vm86(old) for users who cannot mmap at va 0.
260 * The implementation of security_mmap_addr will allow
261 * suitably privileged users to map va 0 even if
262 * vm.mmap_min_addr is set above 0, and we want this
263 * behavior for vm86 as well, as it ensures that legacy
264 * tools like vbetool will not fail just because of
267 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
268 current->comm, task_pid_nr(current),
269 from_kuid_munged(&init_user_ns, current_uid()));
274 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
276 tsk->thread.vm86 = vm86;
281 if (!access_ok(user_vm86, plus ?
282 sizeof(struct vm86_struct) :
283 sizeof(struct vm86plus_struct)))
286 memset(&vm86regs, 0, sizeof(vm86regs));
289 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
290 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
291 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
292 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
293 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
294 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
295 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
296 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
297 get_user_ex(seg, &user_vm86->regs.cs);
298 vm86regs.pt.cs = seg;
299 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
300 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
301 get_user_ex(seg, &user_vm86->regs.ss);
302 vm86regs.pt.ss = seg;
303 get_user_ex(vm86regs.es, &user_vm86->regs.es);
304 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
305 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
306 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
308 get_user_ex(vm86->flags, &user_vm86->flags);
309 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
310 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
311 } get_user_catch(err);
315 if (copy_from_user(&vm86->int_revectored,
316 &user_vm86->int_revectored,
317 sizeof(struct revectored_struct)))
319 if (copy_from_user(&vm86->int21_revectored,
320 &user_vm86->int21_revectored,
321 sizeof(struct revectored_struct)))
324 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
325 sizeof(struct vm86plus_info_struct)))
327 vm86->vm86plus.is_vm86pus = 1;
329 memset(&vm86->vm86plus, 0,
330 sizeof(struct vm86plus_info_struct));
332 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
333 vm86->user_vm86 = user_vm86;
336 * The flags register is also special: we cannot trust that the user
337 * has set it up safely, so this makes sure interrupt etc flags are
338 * inherited from protected mode.
340 VEFLAGS = vm86regs.pt.flags;
341 vm86regs.pt.flags &= SAFE_MASK;
342 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
343 vm86regs.pt.flags |= X86_VM_MASK;
345 vm86regs.pt.orig_ax = regs->orig_ax;
347 switch (vm86->cpu_type) {
349 vm86->veflags_mask = 0;
352 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
355 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
358 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
365 vm86->saved_sp0 = tsk->thread.sp0;
366 lazy_save_gs(vm86->regs32.gs);
368 /* make room for real-mode segments */
370 tsk->thread.sp0 += 16;
372 if (boot_cpu_has(X86_FEATURE_SEP)) {
373 tsk->thread.sysenter_cs = 0;
374 refresh_sysenter_cs(&tsk->thread);
377 update_task_stack(tsk);
380 if (vm86->flags & VM86_SCREEN_BITMAP)
381 mark_screen_rdonly(tsk->mm);
383 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
387 static inline void set_IF(struct kernel_vm86_regs *regs)
389 VEFLAGS |= X86_EFLAGS_VIF;
392 static inline void clear_IF(struct kernel_vm86_regs *regs)
394 VEFLAGS &= ~X86_EFLAGS_VIF;
397 static inline void clear_TF(struct kernel_vm86_regs *regs)
399 regs->pt.flags &= ~X86_EFLAGS_TF;
402 static inline void clear_AC(struct kernel_vm86_regs *regs)
404 regs->pt.flags &= ~X86_EFLAGS_AC;
408 * It is correct to call set_IF(regs) from the set_vflags_*
409 * functions. However someone forgot to call clear_IF(regs)
410 * in the opposite case.
411 * After the command sequence CLI PUSHF STI POPF you should
412 * end up with interrupts disabled, but you ended up with
413 * interrupts enabled.
414 * ( I was testing my own changes, but the only bug I
415 * could find was in a function I had not changed. )
419 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
421 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
422 set_flags(regs->pt.flags, flags, SAFE_MASK);
423 if (flags & X86_EFLAGS_IF)
429 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
431 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
432 set_flags(regs->pt.flags, flags, SAFE_MASK);
433 if (flags & X86_EFLAGS_IF)
439 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
441 unsigned long flags = regs->pt.flags & RETURN_MASK;
443 if (VEFLAGS & X86_EFLAGS_VIF)
444 flags |= X86_EFLAGS_IF;
445 flags |= X86_EFLAGS_IOPL;
446 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
449 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
451 return test_bit(nr, bitmap->__map);
454 #define val_byte(val, n) (((__u8 *)&val)[n])
456 #define pushb(base, ptr, val, err_label) \
460 if (put_user(__val, base + ptr) < 0) \
464 #define pushw(base, ptr, val, err_label) \
468 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
471 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
475 #define pushl(base, ptr, val, err_label) \
479 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
482 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
485 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
488 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
492 #define popb(base, ptr, err_label) \
495 if (get_user(__res, base + ptr) < 0) \
501 #define popw(base, ptr, err_label) \
504 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
507 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
513 #define popl(base, ptr, err_label) \
516 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
519 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
522 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
525 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
531 /* There are so many possible reasons for this function to return
532 * VM86_INTx, so adding another doesn't bother me. We can expect
533 * userspace programs to be able to handle it. (Getting a problem
534 * in userspace is always better than an Oops anyway.) [KD]
536 static void do_int(struct kernel_vm86_regs *regs, int i,
537 unsigned char __user *ssp, unsigned short sp)
539 unsigned long __user *intr_ptr;
540 unsigned long segoffs;
541 struct vm86 *vm86 = current->thread.vm86;
543 if (regs->pt.cs == BIOSSEG)
545 if (is_revectored(i, &vm86->int_revectored))
547 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
549 intr_ptr = (unsigned long __user *) (i << 2);
550 if (get_user(segoffs, intr_ptr))
552 if ((segoffs >> 16) == BIOSSEG)
554 pushw(ssp, sp, get_vflags(regs), cannot_handle);
555 pushw(ssp, sp, regs->pt.cs, cannot_handle);
556 pushw(ssp, sp, IP(regs), cannot_handle);
557 regs->pt.cs = segoffs >> 16;
559 IP(regs) = segoffs & 0xffff;
566 save_v86_state(regs, VM86_INTx + (i << 8));
569 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
571 struct vm86 *vm86 = current->thread.vm86;
573 if (vm86->vm86plus.is_vm86pus) {
574 if ((trapno == 3) || (trapno == 1)) {
575 save_v86_state(regs, VM86_TRAP + (trapno << 8));
578 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
582 return 1; /* we let this handle by the calling routine */
583 current->thread.trap_nr = trapno;
584 current->thread.error_code = error_code;
589 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
591 unsigned char opcode;
592 unsigned char __user *csp;
593 unsigned char __user *ssp;
594 unsigned short ip, sp, orig_flags;
595 int data32, pref_done;
596 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
598 #define CHECK_IF_IN_TRAP \
599 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
600 newflags |= X86_EFLAGS_TF
602 orig_flags = *(unsigned short *)®s->pt.flags;
604 csp = (unsigned char __user *) (regs->pt.cs << 4);
605 ssp = (unsigned char __user *) (regs->pt.ss << 4);
612 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
613 case 0x66: /* 32-bit data */ data32 = 1; break;
614 case 0x67: /* 32-bit address */ break;
615 case 0x2e: /* CS */ break;
616 case 0x3e: /* DS */ break;
617 case 0x26: /* ES */ break;
618 case 0x36: /* SS */ break;
619 case 0x65: /* GS */ break;
620 case 0x64: /* FS */ break;
621 case 0xf2: /* repnz */ break;
622 case 0xf3: /* rep */ break;
623 default: pref_done = 1;
625 } while (!pref_done);
632 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
635 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
639 goto vm86_fault_return;
644 unsigned long newflags;
646 newflags = popl(ssp, sp, simulate_sigsegv);
649 newflags = popw(ssp, sp, simulate_sigsegv);
655 set_vflags_long(newflags, regs);
657 set_vflags_short(newflags, regs);
664 int intno = popb(csp, ip, simulate_sigsegv);
666 if (vmpi->vm86dbg_active) {
667 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
668 save_v86_state(regs, VM86_INTx + (intno << 8));
672 do_int(regs, intno, ssp, sp);
681 unsigned long newflags;
683 newip = popl(ssp, sp, simulate_sigsegv);
684 newcs = popl(ssp, sp, simulate_sigsegv);
685 newflags = popl(ssp, sp, simulate_sigsegv);
688 newip = popw(ssp, sp, simulate_sigsegv);
689 newcs = popw(ssp, sp, simulate_sigsegv);
690 newflags = popw(ssp, sp, simulate_sigsegv);
697 set_vflags_long(newflags, regs);
699 set_vflags_short(newflags, regs);
708 goto vm86_fault_return;
712 * Damn. This is incorrect: the 'sti' instruction should actually
713 * enable interrupts after the /next/ instruction. Not good.
715 * Probably needs some horsing around with the TF flag. Aiee..
723 save_v86_state(regs, VM86_UNKNOWN);
729 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
730 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
731 save_v86_state(regs, VM86_STI);
736 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
737 save_v86_state(regs, VM86_PICRETURN);
740 if (orig_flags & X86_EFLAGS_TF)
741 handle_vm86_trap(regs, 0, X86_TRAP_DB);
745 /* FIXME: After a long discussion with Stas we finally
746 * agreed, that this is wrong. Here we should
747 * really send a SIGSEGV to the user program.
748 * But how do we create the correct context? We
749 * are inside a general protection fault handler
750 * and has just returned from a page fault handler.
751 * The correct context for the signal handler
752 * should be a mixture of the two, but how do we
753 * get the information? [KD]
755 save_v86_state(regs, VM86_UNKNOWN);
758 /* ---------------- vm86 special IRQ passing stuff ----------------- */
760 #define VM86_IRQNAME "vm86irq"
762 static struct vm86_irqs {
763 struct task_struct *tsk;
767 static DEFINE_SPINLOCK(irqbits_lock);
770 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
771 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
774 static irqreturn_t irq_handler(int intno, void *dev_id)
779 spin_lock_irqsave(&irqbits_lock, flags);
780 irq_bit = 1 << intno;
781 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
784 if (vm86_irqs[intno].sig)
785 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
787 * IRQ will be re-enabled when user asks for the irq (whether
788 * polling or as a result of the signal)
790 disable_irq_nosync(intno);
791 spin_unlock_irqrestore(&irqbits_lock, flags);
795 spin_unlock_irqrestore(&irqbits_lock, flags);
799 static inline void free_vm86_irq(int irqnumber)
803 free_irq(irqnumber, NULL);
804 vm86_irqs[irqnumber].tsk = NULL;
806 spin_lock_irqsave(&irqbits_lock, flags);
807 irqbits &= ~(1 << irqnumber);
808 spin_unlock_irqrestore(&irqbits_lock, flags);
811 void release_vm86_irqs(struct task_struct *task)
814 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
815 if (vm86_irqs[i].tsk == task)
819 static inline int get_and_reset_irq(int irqnumber)
825 if (invalid_vm86_irq(irqnumber)) return 0;
826 if (vm86_irqs[irqnumber].tsk != current) return 0;
827 spin_lock_irqsave(&irqbits_lock, flags);
828 bit = irqbits & (1 << irqnumber);
831 enable_irq(irqnumber);
835 spin_unlock_irqrestore(&irqbits_lock, flags);
840 static int do_vm86_irq_handling(int subfunction, int irqnumber)
843 switch (subfunction) {
844 case VM86_GET_AND_RESET_IRQ: {
845 return get_and_reset_irq(irqnumber);
847 case VM86_GET_IRQ_BITS: {
850 case VM86_REQUEST_IRQ: {
851 int sig = irqnumber >> 8;
852 int irq = irqnumber & 255;
853 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
854 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
855 if (invalid_vm86_irq(irq)) return -EPERM;
856 if (vm86_irqs[irq].tsk) return -EPERM;
857 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
859 vm86_irqs[irq].sig = sig;
860 vm86_irqs[irq].tsk = current;
863 case VM86_FREE_IRQ: {
864 if (invalid_vm86_irq(irqnumber)) return -EPERM;
865 if (!vm86_irqs[irqnumber].tsk) return 0;
866 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
867 free_vm86_irq(irqnumber);