Merge commit '8700c95adb03' into timers/nohz
authorFrederic Weisbecker <fweisbec@gmail.com>
Thu, 2 May 2013 15:37:49 +0000 (17:37 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Thu, 2 May 2013 15:54:19 +0000 (17:54 +0200)
The full dynticks tree needs the latest RCU and sched
upstream updates in order to fix some dependencies.

Merge a common upstream merge point that has these
updates.

Conflicts:
include/linux/perf_event.h
kernel/rcutree.h
kernel/rcutree_plugin.h

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
17 files changed:
1  2 
Documentation/RCU/stallwarn.txt
Documentation/kernel-parameters.txt
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/sched.h
init/Kconfig
init/main.c
kernel/events/core.c
kernel/hrtimer.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/softirq.c
kernel/time/tick-broadcast.c

Simple merge
Simple merge
@@@ -799,12 -788,12 +788,18 @@@ static inline int __perf_event_disable(
  static inline void perf_event_task_tick(void)                         { }
  #endif
  
 +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
 +extern bool perf_event_can_stop_tick(void);
 +#else
 +static inline bool perf_event_can_stop_tick(void)                     { return true; }
 +#endif
 +
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+ extern void perf_restore_debug_store(void);
+ #else
+ static inline void perf_restore_debug_store(void)                     { }
+ #endif
  #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  
  /*
Simple merge
Simple merge
diff --cc init/Kconfig
@@@ -580,16 -576,19 +576,19 @@@ config RCU_FANOUT_EXAC
  
  config RCU_FAST_NO_HZ
        bool "Accelerate last non-dyntick-idle CPU's grace periods"
 -      depends on NO_HZ && SMP
 +      depends on NO_HZ_COMMON && SMP
        default n
        help
-         This option causes RCU to attempt to accelerate grace periods in
-         order to allow CPUs to enter dynticks-idle state more quickly.
-         On the other hand, this option increases the overhead of the
-         dynticks-idle checking, thus degrading scheduling latency.
+         This option permits CPUs to enter dynticks-idle state even if
+         they have RCU callbacks queued, and prevents RCU from waking
+         these CPUs up more than roughly once every four jiffies (by
+         default, you can adjust this using the rcutree.rcu_idle_gp_delay
+         parameter), thus improving energy efficiency.  On the other
+         hand, this option increases the duration of RCU grace periods,
+         for example, slowing down synchronize_rcu().
  
-         Say Y if energy efficiency is critically important, and you don't
-               care about real-time response.
+         Say Y if energy efficiency is critically important, and you
+               don't care about increased grace-period durations.
  
          Say N if you are unsure.
  
diff --cc init/main.c
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -529,16 -526,18 +526,18 @@@ static void print_cpu_stall_info(struc
  static void print_cpu_stall_info_end(void);
  static void zero_cpu_stall_ticks(struct rcu_data *rdp);
  static void increment_cpu_stall_ticks(void);
 -static bool is_nocb_cpu(int cpu);
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp);
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
+ static void rcu_init_one_nocb(struct rcu_node *rnp);
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy);
  static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
                                      struct rcu_data *rdp);
- static bool nocb_cpu_expendable(int cpu);
  static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
  static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
- static void init_nocb_callback_list(struct rcu_data *rdp);
- static void __init rcu_init_nocb(void);
 +static void rcu_kick_nohz_cpu(int cpu);
+ static bool init_nocb_callback_list(struct rcu_data *rdp);
  
  #endif /* #ifndef RCU_TREE_NONCORE */
  
@@@ -2166,8 -2010,49 +2011,49 @@@ static int __init parse_rcu_nocb_poll(c
  }
  early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
  
+ /*
+  * Do any no-CBs CPUs need another grace period?
+  *
+  * Interrupts must be disabled.  If the caller does not hold the root
+  * rnp_node structure's ->lock, the results are advisory only.
+  */
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+       struct rcu_node *rnp = rcu_get_root(rsp);
+       return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
+ }
+ /*
+  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+  * grace period.
+  */
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ {
+       wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+ /*
+  * Set the root rcu_node structure's ->need_future_gp field
+  * based on the sum of those of all rcu_node structures.  This does
+  * double-count the root rcu_node structure's requests, but this
+  * is necessary to handle the possibility of a rcu_nocb_kthread()
+  * having awakened during the time that the rcu_node structures
+  * were being updated for the end of the previous grace period.
+  */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+       rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
+ }
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+       init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+       init_waitqueue_head(&rnp->nocb_gp_wq[1]);
+ }
  /* Is the specified CPU a no-CPUs CPU? */
 -static bool is_nocb_cpu(int cpu)
 +bool rcu_is_nocb_cpu(int cpu)
  {
        if (have_rcu_nocb_mask)
                return cpumask_test_cpu(cpu, rcu_nocb_mask);
@@@ -2225,9 -2110,16 +2111,16 @@@ static bool __call_rcu_nocb(struct rcu_
                            bool lazy)
  {
  
 -      if (!is_nocb_cpu(rdp->cpu))
 +      if (!rcu_is_nocb_cpu(rdp->cpu))
                return 0;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
+       if (__is_kfree_rcu_offset((unsigned long)rhp->func))
+               trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+                                        (unsigned long)rhp->func,
+                                        rdp->qlen_lazy, rdp->qlen);
+       else
+               trace_rcu_callback(rdp->rsp->name, rhp,
+                                  rdp->qlen_lazy, rdp->qlen);
        return 1;
  }
  
@@@ -2448,22 -2282,35 +2283,30 @@@ static bool init_nocb_callback_list(str
  {
        if (rcu_nocb_mask == NULL ||
            !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
-               return;
+               return false;
        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+       return true;
+ }
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+       return 0;
  }
  
- /* Initialize the ->call_remote fields in the rcu_state structures. */
- static void __init rcu_init_nocb(void)
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  {
- #ifdef CONFIG_PREEMPT_RCU
-       rcu_preempt_state.call_remote = call_rcu_preempt_remote;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
-       rcu_bh_state.call_remote = call_rcu_bh_remote;
-       rcu_sched_state.call_remote = call_rcu_sched_remote;
  }
  
- #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+ }
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+ }
  
 -static bool is_nocb_cpu(int cpu)
 -{
 -      return false;
 -}
 -
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy)
  {
Simple merge
Simple merge
@@@ -5,9 -5,9 +5,10 @@@
  #include <linux/mutex.h>
  #include <linux/spinlock.h>
  #include <linux/stop_machine.h>
 +#include <linux/tick.h>
  
  #include "cpupri.h"
+ #include "cpuacct.h"
  
  extern __read_mostly int scheduler_running;
  
@@@ -323,33 -323,12 +323,25 @@@ void irq_enter(void
  
  static inline void invoke_softirq(void)
  {
-       if (!force_irqthreads) {
- #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       if (!force_irqthreads)
                __do_softirq();
- #else
-               do_softirq();
- #endif
-       } else {
-               __local_bh_disable((unsigned long)__builtin_return_address(0),
-                               SOFTIRQ_OFFSET);
+       else
                wakeup_softirqd();
-               __local_bh_enable(SOFTIRQ_OFFSET);
-       }
  }
  
 +static inline void tick_irq_exit(void)
 +{
 +#ifdef CONFIG_NO_HZ_COMMON
 +      int cpu = smp_processor_id();
 +
 +      /* Make sure that timer wheel updates are propagated */
 +      if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 +              if (!in_interrupt())
 +                      tick_nohz_irq_exit();
 +      }
 +#endif
 +}
 +
  /*
   * Exit an interrupt context. Process softirqs if needed and possible:
   */
@@@ -361,9 -346,12 +359,8 @@@ void irq_exit(void
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
  
 -#ifdef CONFIG_NO_HZ
 -      /* Make sure that timer wheel updates are propagated */
 -      if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
 -              tick_nohz_irq_exit();
 -#endif
 +      tick_irq_exit();
        rcu_irq_exit();
-       sched_preempt_enable_no_resched();
  }
  
  /*
Simple merge