]> Git Repo - linux.git/commitdiff
Merge branch 'sched/urgent' into sched/core, to avoid conflicts
authorIngo Molnar <[email protected]>
Wed, 7 May 2014 11:15:46 +0000 (13:15 +0200)
committerIngo Molnar <[email protected]>
Wed, 7 May 2014 11:15:46 +0000 (13:15 +0200)
Signed-off-by: Ingo Molnar <[email protected]>
1  2 
drivers/block/loop.c
drivers/block/nbd.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
kernel/sched/fair.c
mm/huge_memory.c

diff --combined drivers/block/loop.c
index c8bf270b7890883dc7b1c3a929ed6ae2c13e61f1,f70a230a2945225f89ae188909c7bc9db90bc32f..6cb1beb47c25d1d2a7db113ca9f173a9ef8b68d3
@@@ -237,7 -237,7 +237,7 @@@ static int __do_lo_send_write(struct fi
        file_end_write(file);
        if (likely(bw == len))
                return 0;
-       printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
+       printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
                        (unsigned long long)pos, len);
        if (bw >= 0)
                bw = -EIO;
@@@ -277,7 -277,7 +277,7 @@@ static int do_lo_send_write(struct loop
                return __do_lo_send_write(lo->lo_backing_file,
                                page_address(page), bvec->bv_len,
                                pos);
-       printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
+       printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
                        "length %i.\n", (unsigned long long)pos, bvec->bv_len);
        if (ret > 0)
                ret = -EIO;
@@@ -316,7 -316,7 +316,7 @@@ static int lo_send(struct loop_device *
  out:
        return ret;
  fail:
-       printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
+       printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
        ret = -ENOMEM;
        goto out;
  }
@@@ -345,7 -345,7 +345,7 @@@ lo_splice_actor(struct pipe_inode_info 
                size = p->bsize;
  
        if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
-               printk(KERN_ERR "loop: transfer error block %ld\n",
+               printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
                       page->index);
                size = -EINVAL;
        }
@@@ -548,7 -548,7 +548,7 @@@ static int loop_thread(void *data
        struct loop_device *lo = data;
        struct bio *bio;
  
 -      set_user_nice(current, -20);
 +      set_user_nice(current, MIN_NICE);
  
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
  
diff --combined drivers/block/nbd.c
index 2a1f26bd6640d9a2c14728644ea1be65f05a4f38,3a70ea2f7cd69b2641302e6c44560f32245a078c..56a027d6115e0f5fa83c48070ff26ca6d0ba061e
@@@ -533,7 -533,7 +533,7 @@@ static int nbd_thread(void *data
        struct nbd_device *nbd = data;
        struct request *req;
  
 -      set_user_nice(current, -20);
 +      set_user_nice(current, MIN_NICE);
        while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
                /* wait for something to do */
                wait_event_interruptible(nbd->waiting_wq,
@@@ -630,37 -630,29 +630,29 @@@ static int __nbd_ioctl(struct block_dev
        }
   
        case NBD_CLEAR_SOCK: {
-               struct file *file;
+               struct socket *sock = nbd->sock;
                nbd->sock = NULL;
-               file = nbd->file;
-               nbd->file = NULL;
                nbd_clear_que(nbd);
                BUG_ON(!list_empty(&nbd->queue_head));
                BUG_ON(!list_empty(&nbd->waiting_queue));
                kill_bdev(bdev);
-               if (file)
-                       fput(file);
+               if (sock)
+                       sockfd_put(sock);
                return 0;
        }
  
        case NBD_SET_SOCK: {
-               struct file *file;
-               if (nbd->file)
+               struct socket *sock;
+               int err;
+               if (nbd->sock)
                        return -EBUSY;
-               file = fget(arg);
-               if (file) {
-                       struct inode *inode = file_inode(file);
-                       if (S_ISSOCK(inode->i_mode)) {
-                               nbd->file = file;
-                               nbd->sock = SOCKET_I(inode);
-                               if (max_part > 0)
-                                       bdev->bd_invalidated = 1;
-                               nbd->disconnect = 0; /* we're connected now */
-                               return 0;
-                       } else {
-                               fput(file);
-                       }
+               sock = sockfd_lookup(arg, &err);
+               if (sock) {
+                       nbd->sock = sock;
+                       if (max_part > 0)
+                               bdev->bd_invalidated = 1;
+                       nbd->disconnect = 0; /* we're connected now */
+                       return 0;
                }
                return -EINVAL;
        }
  
        case NBD_DO_IT: {
                struct task_struct *thread;
-               struct file *file;
+               struct socket *sock;
                int error;
  
                if (nbd->pid)
                        return -EBUSY;
-               if (!nbd->file)
+               if (!nbd->sock)
                        return -EINVAL;
  
                mutex_unlock(&nbd->tx_lock);
                if (error)
                        return error;
                sock_shutdown(nbd, 0);
-               file = nbd->file;
-               nbd->file = NULL;
+               sock = nbd->sock;
+               nbd->sock = NULL;
                nbd_clear_que(nbd);
                dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
                kill_bdev(bdev);
                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
                set_device_ro(bdev, false);
-               if (file)
-                       fput(file);
+               if (sock)
+                       sockfd_put(sock);
                nbd->flags = 0;
                nbd->bytesize = 0;
                bdev->bd_inode->i_size = 0;
@@@ -875,9 -867,7 +867,7 @@@ static int __init nbd_init(void
  
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = nbd_dev[i].disk;
-               nbd_dev[i].file = NULL;
                nbd_dev[i].magic = NBD_MAGIC;
-               nbd_dev[i].flags = 0;
                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
index 0f20d3646a465aefc16537d8037811a9c82bed56,1c4bb4f6ce932f95043385d1387ed14b789a9996..5d665680ae33fea5bbc74084d1d79d37f62c4f15
@@@ -217,7 -217,7 +217,7 @@@ struct smi_info 
        unsigned char       msg_flags;
  
        /* Does the BMC have an event buffer? */
-       char                has_event_buffer;
+       bool                has_event_buffer;
  
        /*
         * If set to true, this will request events the next time the
         * call.  Generally used after a panic to make sure stuff goes
         * out.
         */
-       int                 run_to_completion;
+       bool                run_to_completion;
  
        /* The I/O port of an SI interface. */
        int                 port;
        /* The timer for this si. */
        struct timer_list   si_timer;
  
+       /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+       bool                timer_running;
        /* The time (in jiffies) the last timeout occurred at. */
        unsigned long       last_timeout_jiffies;
  
        /* Used to gracefully stop the timer without race conditions. */
        atomic_t            stop_operation;
  
+       /* Are we waiting for the events, pretimeouts, received msgs? */
+       atomic_t            need_watch;
        /*
         * The driver will disable interrupts when it gets into a
         * situation where it cannot handle messages due to lack of
         * memory.  Once that situation clears up, it will re-enable
         * interrupts.
         */
-       int interrupt_disabled;
+       bool interrupt_disabled;
  
        /* From the get device id response... */
        struct ipmi_device_id device_id;
         * True if we allocated the device, false if it came from
         * someplace else (like PCI).
         */
-       int dev_registered;
+       bool dev_registered;
  
        /* Slave address, could be reported from DMI. */
        unsigned char slave_addr;
  static int force_kipmid[SI_MAX_PARMS];
  static int num_force_kipmid;
  #ifdef CONFIG_PCI
- static int pci_registered;
+ static bool pci_registered;
  #endif
  #ifdef CONFIG_ACPI
- static int pnp_registered;
+ static bool pnp_registered;
  #endif
  #ifdef CONFIG_PARISC
- static int parisc_registered;
+ static bool parisc_registered;
  #endif
  
  static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
  static int num_max_busy_us;
  
- static int unload_when_empty = 1;
+ static bool unload_when_empty = true;
  
  static int add_smi(struct smi_info *smi);
  static int try_smi_init(struct smi_info *smi);
@@@ -434,6 -440,13 +440,13 @@@ static void start_clear_flags(struct sm
        smi_info->si_state = SI_CLEARING_FLAGS;
  }
  
+ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+ {
+       smi_info->last_timeout_jiffies = jiffies;
+       mod_timer(&smi_info->si_timer, new_val);
+       smi_info->timer_running = true;
+ }
  /*
   * When we have a situtaion where we run out of memory and cannot
   * allocate messages, we just leave them in the BMC and run the system
@@@ -444,10 -457,9 +457,9 @@@ static inline void disable_si_irq(struc
  {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                start_disable_irq(smi_info);
-               smi_info->interrupt_disabled = 1;
+               smi_info->interrupt_disabled = true;
                if (!atomic_read(&smi_info->stop_operation))
-                       mod_timer(&smi_info->si_timer,
-                                 jiffies + SI_TIMEOUT_JIFFIES);
+                       smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
        }
  }
  
@@@ -455,7 -467,7 +467,7 @@@ static inline void enable_si_irq(struc
  {
        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
                start_enable_irq(smi_info);
-               smi_info->interrupt_disabled = 0;
+               smi_info->interrupt_disabled = false;
        }
  }
  
@@@ -700,7 -712,7 +712,7 @@@ static void handle_transaction_done(str
                        dev_warn(smi_info->dev,
                                 "Maybe ok, but ipmi might run very slowly.\n");
                } else
-                       smi_info->interrupt_disabled = 0;
+                       smi_info->interrupt_disabled = false;
                smi_info->si_state = SI_NORMAL;
                break;
        }
@@@ -853,6 -865,19 +865,19 @@@ static enum si_sm_result smi_event_hand
        return si_sm_result;
  }
  
+ static void check_start_timer_thread(struct smi_info *smi_info)
+ {
+       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+               smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+               if (smi_info->thread)
+                       wake_up_process(smi_info->thread);
+               start_next_msg(smi_info);
+               smi_event_handler(smi_info, 0);
+       }
+ }
  static void sender(void                *send_info,
                   struct ipmi_smi_msg *msg,
                   int                 priority)
        else
                list_add_tail(&msg->link, &smi_info->xmit_msgs);
  
-       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
-               /*
-                * last_timeout_jiffies is updated here to avoid
-                * smi_timeout() handler passing very large time_diff
-                * value to smi_event_handler() that causes
-                * the send command to abort.
-                */
-               smi_info->last_timeout_jiffies = jiffies;
-               mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
-               if (smi_info->thread)
-                       wake_up_process(smi_info->thread);
-               start_next_msg(smi_info);
-               smi_event_handler(smi_info, 0);
-       }
+       check_start_timer_thread(smi_info);
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
  }
  
- static void set_run_to_completion(void *send_info, int i_run_to_completion)
+ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
  {
        struct smi_info   *smi_info = send_info;
        enum si_sm_result result;
@@@ -998,12 -1007,23 +1007,23 @@@ static int ipmi_thread(void *data
        struct timespec busy_until;
  
        ipmi_si_set_not_busy(&busy_until);
 -      set_user_nice(current, 19);
 +      set_user_nice(current, MAX_NICE);
        while (!kthread_should_stop()) {
                int busy_wait;
  
                spin_lock_irqsave(&(smi_info->si_lock), flags);
                smi_result = smi_event_handler(smi_info, 0);
+               /*
+                * If the driver is doing something, there is a possible
+                * race with the timer.  If the timer handler see idle,
+                * and the thread here sees something else, the timer
+                * handler won't restart the timer even though it is
+                * required.  So start it here if necessary.
+                */
+               if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+                       smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
                                                  &busy_until);
                        ; /* do nothing */
                else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
                        schedule();
-               else if (smi_result == SI_SM_IDLE)
-                       schedule_timeout_interruptible(100);
-               else
+               else if (smi_result == SI_SM_IDLE) {
+                       if (atomic_read(&smi_info->need_watch)) {
+                               schedule_timeout_interruptible(100);
+                       } else {
+                               /* Wait to be woken up when we are needed. */
+                               __set_current_state(TASK_INTERRUPTIBLE);
+                               schedule();
+                       }
+               } else
                        schedule_timeout_interruptible(1);
        }
        return 0;
@@@ -1024,7 -1050,7 +1050,7 @@@ static void poll(void *send_info
  {
        struct smi_info *smi_info = send_info;
        unsigned long flags = 0;
-       int run_to_completion = smi_info->run_to_completion;
+       bool run_to_completion = smi_info->run_to_completion;
  
        /*
         * Make sure there is some delay in the poll loop so we can
@@@ -1049,6 -1075,17 +1075,17 @@@ static void request_events(void *send_i
        atomic_set(&smi_info->req_events, 1);
  }
  
+ static void set_need_watch(void *send_info, bool enable)
+ {
+       struct smi_info *smi_info = send_info;
+       unsigned long flags;
+       atomic_set(&smi_info->need_watch, enable);
+       spin_lock_irqsave(&smi_info->si_lock, flags);
+       check_start_timer_thread(smi_info);
+       spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ }
  static int initialized;
  
  static void smi_timeout(unsigned long data)
                     * SI_USEC_PER_JIFFY);
        smi_result = smi_event_handler(smi_info, time_diff);
  
-       spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-       smi_info->last_timeout_jiffies = jiffies_now;
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                /* Running with interrupts, only do long timeouts. */
                timeout = jiffies + SI_TIMEOUT_JIFFIES;
  
   do_mod_timer:
        if (smi_result != SI_SM_IDLE)
-               mod_timer(&(smi_info->si_timer), timeout);
+               smi_mod_timer(smi_info, timeout);
+       else
+               smi_info->timer_running = false;
+       spin_unlock_irqrestore(&(smi_info->si_lock), flags);
  }
  
  static irqreturn_t si_irq_handler(int irq, void *data)
@@@ -1146,8 -1182,7 +1182,7 @@@ static int smi_start_processing(voi
  
        /* Set up the timer that drives the interface. */
        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
-       new_smi->last_timeout_jiffies = jiffies;
-       mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+       smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
  
        /*
         * Check if the user forcefully enabled the daemon.
@@@ -1188,7 -1223,7 +1223,7 @@@ static int get_smi_info(void *send_info
        return 0;
  }
  
- static void set_maintenance_mode(void *send_info, int enable)
+ static void set_maintenance_mode(void *send_info, bool enable)
  {
        struct smi_info   *smi_info = send_info;
  
@@@ -1202,6 -1237,7 +1237,7 @@@ static struct ipmi_smi_handlers handler
        .get_smi_info           = get_smi_info,
        .sender                 = sender,
        .request_events         = request_events,
+       .set_need_watch         = set_need_watch,
        .set_maintenance_mode   = set_maintenance_mode,
        .set_run_to_completion  = set_run_to_completion,
        .poll                   = poll,
@@@ -1229,7 -1265,7 +1265,7 @@@ static bool          si_tryplatform = 1
  #ifdef CONFIG_PCI
  static bool          si_trypci = 1;
  #endif
- static bool          si_trydefaults = 1;
+ static bool          si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
  static char          *si_type[SI_MAX_PARMS];
  #define MAX_SI_TYPE_STR 30
  static char          si_type_str[MAX_SI_TYPE_STR];
@@@ -1328,7 -1364,7 +1364,7 @@@ module_param_array(force_kipmid, int, &
  MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
                 " disabled(0).  Normally the IPMI driver auto-detects"
                 " this, but the value may be overridden by this parm.");
- module_param(unload_when_empty, int, 0);
+ module_param(unload_when_empty, bool, 0);
  MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
                 " specified or found, default is 1.  Setting to 0"
                 " is useful for hot add of devices using hotmod.");
@@@ -3336,18 -3372,19 +3372,19 @@@ static int try_smi_init(struct smi_inf
        INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
        new_smi->curr_msg = NULL;
        atomic_set(&new_smi->req_events, 0);
-       new_smi->run_to_completion = 0;
+       new_smi->run_to_completion = false;
        for (i = 0; i < SI_NUM_STATS; i++)
                atomic_set(&new_smi->stats[i], 0);
  
-       new_smi->interrupt_disabled = 1;
+       new_smi->interrupt_disabled = true;
        atomic_set(&new_smi->stop_operation, 0);
+       atomic_set(&new_smi->need_watch, 0);
        new_smi->intf_num = smi_num;
        smi_num++;
  
        rv = try_enable_event_buffer(new_smi);
        if (rv == 0)
-               new_smi->has_event_buffer = 1;
+               new_smi->has_event_buffer = true;
  
        /*
         * Start clearing the flags before we enable interrupts or the
                               rv);
                        goto out_err;
                }
-               new_smi->dev_registered = 1;
+               new_smi->dev_registered = true;
        }
  
        rv = ipmi_register_smi(&handlers,
        wait_for_timer_and_thread(new_smi);
  
   out_err:
-       new_smi->interrupt_disabled = 1;
+       new_smi->interrupt_disabled = true;
  
        if (new_smi->intf) {
                ipmi_unregister_smi(new_smi->intf);
  
        if (new_smi->dev_registered) {
                platform_device_unregister(new_smi->pdev);
-               new_smi->dev_registered = 0;
+               new_smi->dev_registered = false;
        }
  
        return rv;
@@@ -3521,14 -3558,14 +3558,14 @@@ static int init_ipmi_si(void
                        printk(KERN_ERR PFX "Unable to register "
                               "PCI driver: %d\n", rv);
                else
-                       pci_registered = 1;
+                       pci_registered = true;
        }
  #endif
  
  #ifdef CONFIG_ACPI
        if (si_tryacpi) {
                pnp_register_driver(&ipmi_pnp_driver);
-               pnp_registered = 1;
+               pnp_registered = true;
        }
  #endif
  
  
  #ifdef CONFIG_PARISC
        register_parisc_driver(&ipmi_parisc_driver);
-       parisc_registered = 1;
+       parisc_registered = true;
        /* poking PC IO addresses will crash machine, don't do it */
        si_trydefaults = 0;
  #endif
index 3455cc5e4bfd245a21f9e1a5c866faa347b43d86,1d41f4b9114f8253e780d279799dad0ac0d27e04..f548430234663691b80f3e50ab94e2088bdf25ad
@@@ -464,7 -464,7 +464,7 @@@ static int bnx2fc_l2_rcv_thread(void *a
        struct fcoe_percpu_s *bg = arg;
        struct sk_buff *skb;
  
 -      set_user_nice(current, -20);
 +      set_user_nice(current, MIN_NICE);
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                schedule();
@@@ -602,7 -602,7 +602,7 @@@ int bnx2fc_percpu_io_thread(void *arg
        struct bnx2fc_work *work, *tmp;
        LIST_HEAD(work_list);
  
 -      set_user_nice(current, -20);
 +      set_user_nice(current, MIN_NICE);
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                schedule();
@@@ -2592,12 -2592,16 +2592,16 @@@ static int __init bnx2fc_mod_init(void
                spin_lock_init(&p->fp_work_lock);
        }
  
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu) {
                bnx2fc_percpu_thread_create(cpu);
        }
  
        /* Initialize per CPU interrupt thread */
-       register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       cpu_notifier_register_done();
  
        cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
  
@@@ -2662,13 -2666,17 +2666,17 @@@ static void __exit bnx2fc_mod_exit(void
        if (l2_thread)
                kthread_stop(l2_thread);
  
-       unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       cpu_notifier_register_begin();
  
        /* Destroy per cpu threads */
        for_each_online_cpu(cpu) {
                bnx2fc_percpu_thread_destroy(cpu);
        }
  
+       __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       cpu_notifier_register_done();
        destroy_workqueue(bnx2fc_wq);
        /*
         * detach from scsi transport
diff --combined drivers/scsi/fcoe/fcoe.c
index 843a679d2a5e5733355e5aa3819651886d1d6071,d5e105b173f0cf121894fcb5105a5afedadc16d5..00ee0ed642aac717fd8c0b1e2976c860ccb664ff
@@@ -1872,7 -1872,7 +1872,7 @@@ static int fcoe_percpu_receive_thread(v
  
        skb_queue_head_init(&tmp);
  
 -      set_user_nice(current, -20);
 +      set_user_nice(current, MIN_NICE);
  
  retry:
        while (!kthread_should_stop()) {
@@@ -2633,14 -2633,18 +2633,18 @@@ static int __init fcoe_init(void
                skb_queue_head_init(&p->fcoe_rx_list);
        }
  
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                fcoe_percpu_thread_create(cpu);
  
        /* Initialize per CPU interrupt thread */
-       rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
+       rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
        if (rc)
                goto out_free;
  
+       cpu_notifier_register_done();
        /* Setup link change notification */
        fcoe_dev_setup();
  
@@@ -2655,6 -2659,9 +2659,9 @@@ out_free
        for_each_online_cpu(cpu) {
                fcoe_percpu_thread_destroy(cpu);
        }
+       cpu_notifier_register_done();
        mutex_unlock(&fcoe_config_mutex);
        destroy_workqueue(fcoe_wq);
        return rc;
@@@ -2687,11 -2694,15 +2694,15 @@@ static void __exit fcoe_exit(void
        }
        rtnl_unlock();
  
-       unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+       cpu_notifier_register_begin();
  
        for_each_online_cpu(cpu)
                fcoe_percpu_thread_destroy(cpu);
  
+       __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+       cpu_notifier_register_done();
        mutex_unlock(&fcoe_config_mutex);
  
        /*
diff --combined kernel/sched/fair.c
index 43232b8bacde740904d939123325e5be9e886264,0fdb96de81a5b8a92c302961769cdefdb5cad915..5d859ec975c2caef20530e0cea4410d37a186f57
@@@ -5564,7 -5564,6 +5564,7 @@@ static unsigned long scale_rt_power(in
  {
        struct rq *rq = cpu_rq(cpu);
        u64 total, available, age_stamp, avg;
 +      s64 delta;
  
        /*
         * Since we're reading these variables without serialization make sure
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
  
 -      total = sched_avg_period() + (rq_clock(rq) - age_stamp);
 +      delta = rq_clock(rq) - age_stamp;
 +      if (unlikely(delta < 0))
 +              delta = 0;
 +
 +      total = sched_avg_period() + delta;
  
        if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
@@@ -6658,6 -6653,7 +6658,7 @@@ static int idle_balance(struct rq *this
        int this_cpu = this_rq->cpu;
  
        idle_enter_fair(this_rq);
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
  
        raw_spin_lock(&this_rq->lock);
  
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
  
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
                this_rq->next_balance = next_balance;
        }
  
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
  out:
        /* Is there a task of a high priority class? */
 -      if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
 -          ((this_rq->stop && this_rq->stop->on_rq) ||
 -           this_rq->dl.dl_nr_running ||
 -           (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
 +      if (this_rq->nr_running != this_rq->cfs.h_nr_running)
                pulled_task = -1;
  
        if (pulled_task) {
diff --combined mm/huge_memory.c
index dcdb6f9adea194f323a19a4303fff499c5c11294,b4b1feba64724234dee1b66a482c79a0cd3c0f95..d199d2d919467eeddbc82127f0ffbf780e947138
@@@ -827,7 -827,7 +827,7 @@@ int do_huge_pmd_anonymous_page(struct m
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@@ -941,81 -941,6 +941,6 @@@ unlock
        spin_unlock(ptl);
  }
  
- static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long address,
-               pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
- {
-       spinlock_t *ptl;
-       pgtable_t pgtable;
-       pmd_t _pmd;
-       struct page *page;
-       int i, ret = 0;
-       unsigned long mmun_start;       /* For mmu_notifiers */
-       unsigned long mmun_end;         /* For mmu_notifiers */
-       page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-       if (!page) {
-               ret |= VM_FAULT_OOM;
-               goto out;
-       }
-       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
-               put_page(page);
-               ret |= VM_FAULT_OOM;
-               goto out;
-       }
-       clear_user_highpage(page, address);
-       __SetPageUptodate(page);
-       mmun_start = haddr;
-       mmun_end   = haddr + HPAGE_PMD_SIZE;
-       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-       ptl = pmd_lock(mm, pmd);
-       if (unlikely(!pmd_same(*pmd, orig_pmd)))
-               goto out_free_page;
-       pmdp_clear_flush(vma, haddr, pmd);
-       /* leave pmd empty until pte is filled */
-       pgtable = pgtable_trans_huge_withdraw(mm, pmd);
-       pmd_populate(mm, &_pmd, pgtable);
-       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
-               pte_t *pte, entry;
-               if (haddr == (address & PAGE_MASK)) {
-                       entry = mk_pte(page, vma->vm_page_prot);
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-                       page_add_new_anon_rmap(page, vma, haddr);
-               } else {
-                       entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
-                       entry = pte_mkspecial(entry);
-               }
-               pte = pte_offset_map(&_pmd, haddr);
-               VM_BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, haddr, pte, entry);
-               pte_unmap(pte);
-       }
-       smp_wmb(); /* make pte visible before pmd */
-       pmd_populate(mm, pmd, pgtable);
-       spin_unlock(ptl);
-       put_huge_zero_page();
-       inc_mm_counter(mm, MM_ANONPAGES);
-       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-       ret |= VM_FAULT_WRITE;
- out:
-       return ret;
- out_free_page:
-       spin_unlock(ptl);
-       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-       mem_cgroup_uncharge_page(page);
-       put_page(page);
-       goto out;
- }
  static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
                                               __GFP_OTHER_NODE,
                                               vma, address, page_to_nid(page));
                if (unlikely(!pages[i] ||
-                            mem_cgroup_newpage_charge(pages[i], mm,
+                            mem_cgroup_charge_anon(pages[i], mm,
                                                       GFP_KERNEL))) {
                        if (pages[i])
                                put_page(pages[i]);
@@@ -1161,8 -1086,8 +1086,8 @@@ alloc
  
        if (unlikely(!new_page)) {
                if (!page) {
-                       ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
-                                       address, pmd, orig_pmd, haddr);
+                       split_huge_page_pmd(vma, address, pmd);
+                       ret |= VM_FAULT_FALLBACK;
                } else {
                        ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                        pmd, orig_pmd, page, haddr);
                goto out;
        }
  
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
@@@ -1611,16 -1536,23 +1536,23 @@@ pmd_t *page_check_address_pmd(struct pa
                              enum page_check_address_pmd_flag flag,
                              spinlock_t **ptl)
  {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
  
        if (address & ~HPAGE_PMD_MASK)
                return NULL;
  
-       pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
                return NULL;
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               return NULL;
+       pmd = pmd_offset(pud, address);
        *ptl = pmd_lock(mm, pmd);
-       if (pmd_none(*pmd))
+       if (!pmd_present(*pmd))
                goto unlock;
        if (pmd_page(*pmd) != page)
                goto unlock;
  int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
  {
-       struct mm_struct *mm = vma->vm_mm;
        switch (advice) {
        case MADV_HUGEPAGE:
+ #ifdef CONFIG_S390
+               /*
+                * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
+                * can't handle this properly after s390_enable_sie, so we simply
+                * ignore the madvise to prevent qemu from causing a SIGSEGV.
+                */
+               if (mm_has_pgste(vma->vm_mm))
+                       return 0;
+ #endif
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
                        return -EINVAL;
-               if (mm->def_flags & VM_NOHUGEPAGE)
-                       return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
                /*
@@@ -2429,7 -2366,7 +2366,7 @@@ static void collapse_huge_page(struct m
        if (!new_page)
                return;
  
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
                return;
  
        /*
@@@ -2803,7 -2740,7 +2740,7 @@@ static int khugepaged(void *none
        struct mm_slot *mm_slot;
  
        set_freezable();
 -      set_user_nice(current, 19);
 +      set_user_nice(current, MAX_NICE);
  
        while (!kthread_should_stop()) {
                khugepaged_do_scan();
This page took 0.164531 seconds and 4 git commands to generate.