]> Git Repo - linux.git/commitdiff
Merge branch 'x86/alternatives' into x86/core
authorIngo Molnar <[email protected]>
Fri, 5 Sep 2008 15:03:17 +0000 (17:03 +0200)
committerIngo Molnar <[email protected]>
Fri, 5 Sep 2008 15:03:17 +0000 (17:03 +0200)
1  2 
arch/x86/kernel/alternative.c
include/asm-x86/futex.h
include/asm-x86/spinlock.h

index 65a0c1b4869636122fc22dec84de26788662a2d4,7ead11f3732d789e90fb9f2e1990235c4e19a794..fb04e49776ba308b2cd8f86315bcb7692122ce80
@@@ -145,25 -145,35 +145,25 @@@ static const unsigned char *const p6_no
  extern char __vsyscall_0;
  const unsigned char *const *find_nop_table(void)
  {
 -      return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
 -             boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
 +      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 +          boot_cpu_has(X86_FEATURE_NOPL))
 +              return p6_nops;
 +      else
 +              return k8_nops;
  }
  
  #else /* CONFIG_X86_64 */
  
 -static const struct nop {
 -      int cpuid;
 -      const unsigned char *const *noptable;
 -} noptypes[] = {
 -      { X86_FEATURE_K8, k8_nops },
 -      { X86_FEATURE_K7, k7_nops },
 -      { X86_FEATURE_P4, p6_nops },
 -      { X86_FEATURE_P3, p6_nops },
 -      { -1, NULL }
 -};
 -
  const unsigned char *const *find_nop_table(void)
  {
 -      const unsigned char *const *noptable = intel_nops;
 -      int i;
 -
 -      for (i = 0; noptypes[i].cpuid >= 0; i++) {
 -              if (boot_cpu_has(noptypes[i].cpuid)) {
 -                      noptable = noptypes[i].noptable;
 -                      break;
 -              }
 -      }
 -      return noptable;
 +      if (boot_cpu_has(X86_FEATURE_K8))
 +              return k8_nops;
 +      else if (boot_cpu_has(X86_FEATURE_K7))
 +              return k7_nops;
 +      else if (boot_cpu_has(X86_FEATURE_NOPL))
 +              return p6_nops;
 +      else
 +              return intel_nops;
  }
  
  #endif /* CONFIG_X86_64 */
@@@ -231,25 -241,25 +231,25 @@@ static void alternatives_smp_lock(u8 **
                        continue;
                if (*ptr > text_end)
                        continue;
-               text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
+               /* turn DS segment override prefix into lock prefix */
+               text_poke(*ptr, ((unsigned char []){0xf0}), 1);
        };
  }
  
  static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
  {
        u8 **ptr;
-       char insn[1];
  
        if (noreplace_smp)
                return;
  
-       add_nops(insn, 1);
        for (ptr = start; ptr < end; ptr++) {
                if (*ptr < text)
                        continue;
                if (*ptr > text_end)
                        continue;
-               text_poke(*ptr, insn, 1);
+               /* turn lock prefix into DS segment override prefix */
+               text_poke(*ptr, ((unsigned char []){0x3E}), 1);
        };
  }
  
diff --combined include/asm-x86/futex.h
index 45dc24d841865eb3d3e2b3ee374f5c7c50bb5719,d1b988ce080a651822ced57c11eba4c7b27bb9d7..06b924ef6fa5b928dc6a6d83889ee6ce98cc157c
@@@ -1,5 -1,5 +1,5 @@@
 -#ifndef _ASM_X86_FUTEX_H
 -#define _ASM_X86_FUTEX_H
 +#ifndef ASM_X86__FUTEX_H
 +#define ASM_X86__FUTEX_H
  
  #ifdef __KERNEL__
  
@@@ -25,7 -25,7 +25,7 @@@
        asm volatile("1:\tmovl  %2, %0\n"                       \
                     "\tmovl\t%0, %3\n"                         \
                     "\t" insn "\n"                             \
-                    "2:\tlock; cmpxchgl %3, %2\n"              \
+                    "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"     \
                     "\tjnz\t1b\n"                              \
                     "3:\t.section .fixup,\"ax\"\n"             \
                     "4:\tmov\t%5, %1\n"                        \
@@@ -64,7 -64,7 +64,7 @@@ static inline int futex_atomic_op_inuse
                __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval,
+               __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
                                   uaddr, oparg);
                break;
        case FUTEX_OP_OR:
@@@ -122,7 -122,7 +122,7 @@@ static inline int futex_atomic_cmpxchg_
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
  
-       asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
                     "2:\t.section .fixup, \"ax\"\n"
                     "3:\tmov     %2, %0\n"
                     "\tjmp     2b\n"
  }
  
  #endif
 -#endif
 +#endif /* ASM_X86__FUTEX_H */
index 5d08fa280fdf3f4f27594b6a2d78ebe702d31731,0b4d59a93d2328500d91e26268298e1468456c25..93adae338ac64625ab1c1ec63634edd8c3d8dcd3
@@@ -1,5 -1,5 +1,5 @@@
 -#ifndef _X86_SPINLOCK_H_
 -#define _X86_SPINLOCK_H_
 +#ifndef ASM_X86__SPINLOCK_H
 +#define ASM_X86__SPINLOCK_H
  
  #include <asm/atomic.h>
  #include <asm/rwlock.h>
@@@ -65,7 -65,7 +65,7 @@@ static inline int __ticket_spin_is_cont
  {
        int tmp = ACCESS_ONCE(lock->slock);
  
 -      return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
 +      return (((tmp >> 8) - tmp) & 0xff) > 1;
  }
  
  static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
@@@ -97,7 -97,7 +97,7 @@@ static __always_inline int __ticket_spi
                     "jne 1f\n\t"
                     "movw %w0,%w1\n\t"
                     "incb %h1\n\t"
-                    "lock ; cmpxchgw %w1,%2\n\t"
+                    LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
@@@ -127,7 -127,7 +127,7 @@@ static inline int __ticket_spin_is_cont
  {
        int tmp = ACCESS_ONCE(lock->slock);
  
 -      return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
 +      return (((tmp >> 16) - tmp) & 0xffff) > 1;
  }
  
  static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
        int inc = 0x00010000;
        int tmp;
  
-       asm volatile("lock ; xaddl %0, %1\n"
+       asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
                     "movzwl %w0, %2\n\t"
                     "shrl $16, %0\n\t"
                     "1:\t"
@@@ -162,7 -162,7 +162,7 @@@ static __always_inline int __ticket_spi
                     "cmpl %0,%1\n\t"
                     "jne 1f\n\t"
                     "addl $0x00010000, %1\n\t"
-                    "lock ; cmpxchgl %1,%2\n\t"
+                    LOCK_PREFIX "cmpxchgl %1,%2\n\t"
                     "1:"
                     "sete %b1\n\t"
                     "movzbl %b1,%0\n\t"
@@@ -366,4 -366,4 +366,4 @@@ static inline void __raw_write_unlock(r
  #define _raw_read_relax(lock) cpu_relax()
  #define _raw_write_relax(lock)        cpu_relax()
  
 -#endif
 +#endif /* ASM_X86__SPINLOCK_H */
This page took 0.065221 seconds and 4 git commands to generate.