]> Git Repo - linux.git/commitdiff
Merge remote branch 'tip/x86/entry' into kvm-updates/2.6.33
authorAvi Kivity <[email protected]>
Thu, 3 Dec 2009 07:30:06 +0000 (09:30 +0200)
committerAvi Kivity <[email protected]>
Thu, 3 Dec 2009 07:30:06 +0000 (09:30 +0200)
Signed-off-by: Avi Kivity <[email protected]>
1  2 
arch/x86/Kconfig
kernel/Makefile
kernel/fork.c

diff --combined arch/x86/Kconfig
index 72ace9515a07a44525778899e1ea04b32b3accbc,1df175d15aa8c56c2709425c891f29642276bac0..8b54096e6d7347c48dfd8a2cf56babcccc0fb11c
@@@ -50,6 -50,7 +50,7 @@@ config X8
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_LZMA
        select HAVE_ARCH_KMEMCHECK
+       select HAVE_USER_RETURN_NOTIFIER
  
  config OUTPUT_FORMAT
        string
@@@ -86,6 -87,10 +87,6 @@@ config STACKTRACE_SUPPOR
  config HAVE_LATENCYTOP_SUPPORT
        def_bool y
  
 -config FAST_CMPXCHG_LOCAL
 -      bool
 -      default y
 -
  config MMU
        def_bool y
  
@@@ -491,7 -496,7 +492,7 @@@ if PARAVIRT_GUES
  source "arch/x86/xen/Kconfig"
  
  config VMI
 -      bool "VMI Guest support"
 +      bool "VMI Guest support (DEPRECATED)"
        select PARAVIRT
        depends on X86_32
        ---help---
          at the moment), by linking the kernel to a GPL-ed ROM module
          provided by the hypervisor.
  
 +        As of September 2009, VMware has started a phased retirement
 +        of this feature from VMware's products. Please see
 +        feature-removal-schedule.txt for details.  If you are
 +        planning to enable this option, please note that you cannot
 +        live migrate a VMI enabled VM to a future VMware product,
 +        which doesn't support VMI. So if you expect your kernel to
 +        seamlessly migrate to newer VMware products, keep this
 +        disabled.
 +
  config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
@@@ -1443,8 -1439,12 +1444,8 @@@ config SECCOM
  
          If unsure, say Y. Only embedded should say N here.
  
 -config CC_STACKPROTECTOR_ALL
 -      bool
 -
  config CC_STACKPROTECTOR
        bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
 -      select CC_STACKPROTECTOR_ALL
        ---help---
          This option turns on the -fstack-protector GCC feature. This
          feature puts, at the beginning of functions, a canary value on
diff --combined kernel/Makefile
index d7c13d249b2d7176e4f534cf5835ba7b65749cbb,0ae57a83d481feab2630d1f5856807b298743cce..6c5112844980152377428edd6ececa111b7d2ec4
@@@ -94,8 -94,8 +94,9 @@@ obj-$(CONFIG_X86_DS) += trace
  obj-$(CONFIG_RING_BUFFER) += trace/
  obj-$(CONFIG_SMP) += sched_cpupri.o
  obj-$(CONFIG_SLOW_WORK) += slow-work.o
 +obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
  obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+ obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
  
  ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
  # According to Alan Modra <[email protected]>, the -fno-omit-frame-pointer is
diff --combined kernel/fork.c
index 166b8c49257c589750f0b67479011ba7bac97038,1b7512d5a64a88fb76908251e38104f13008f72c..7d4a348ea4f4ef3c6e6b840c506f00c25d1e6b5e
@@@ -64,6 -64,7 +64,7 @@@
  #include <linux/magic.h>
  #include <linux/perf_event.h>
  #include <linux/posix-timers.h>
+ #include <linux/user-return-notifier.h>
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
@@@ -91,7 -92,7 +92,7 @@@ int nr_processes(void
        int cpu;
        int total = 0;
  
 -      for_each_online_cpu(cpu)
 +      for_each_possible_cpu(cpu)
                total += per_cpu(process_counts, cpu);
  
        return total;
@@@ -249,6 -250,7 +250,7 @@@ static struct task_struct *dup_task_str
                goto out;
  
        setup_thread_stack(tsk, orig);
+       clear_user_return_notifier(tsk);
        stackend = end_of_stack(tsk);
        *stackend = STACK_END_MAGIC;    /* for overflow detection */
  
@@@ -570,18 -572,12 +572,18 @@@ void mm_release(struct task_struct *tsk
  
        /* Get rid of any futexes when releasing the mm */
  #ifdef CONFIG_FUTEX
 -      if (unlikely(tsk->robust_list))
 +      if (unlikely(tsk->robust_list)) {
                exit_robust_list(tsk);
 +              tsk->robust_list = NULL;
 +      }
  #ifdef CONFIG_COMPAT
 -      if (unlikely(tsk->compat_robust_list))
 +      if (unlikely(tsk->compat_robust_list)) {
                compat_exit_robust_list(tsk);
 +              tsk->compat_robust_list = NULL;
 +      }
  #endif
 +      if (unlikely(!list_empty(&tsk->pi_state_list)))
 +              exit_pi_state_list(tsk);
  #endif
  
        /* Get rid of any cached register state */
This page took 0.066024 seconds and 4 git commands to generate.