]> Git Repo - linux.git/commitdiff
Merge branch 'powerpc.cherry-picks' into timers/clocksource
authorThomas Gleixner <[email protected]>
Wed, 28 Jul 2010 19:49:22 +0000 (21:49 +0200)
committerThomas Gleixner <[email protected]>
Wed, 28 Jul 2010 19:49:22 +0000 (21:49 +0200)
Conflicts:
arch/powerpc/kernel/time.c

Reason: The powerpc next tree contains two commits which conflict with
the timekeeping changes:

8fd63a9e powerpc: Rework VDSO gettimeofday to prevent time going backwards
c1aa687d powerpc: Clean up obsolete code relating to decrementer and timebase

John Stultz identified them and provided the conflict resolution.

Signed-off-by: Thomas Gleixner <[email protected]>
1  2 
arch/powerpc/kernel/time.c

index e215f76bba1cab1b9c4ca2636abaea518a96be60,ccb8759c8532e6955a0b5c1f05d2e3cdd83fee0b..ce53dfa7130dfc74f551c2360746f206154cf925
@@@ -149,16 -149,6 +149,6 @@@ unsigned long tb_ticks_per_usec = 100; 
  EXPORT_SYMBOL(tb_ticks_per_usec);
  unsigned long tb_ticks_per_sec;
  EXPORT_SYMBOL(tb_ticks_per_sec);      /* for cputime_t conversions */
- u64 tb_to_xs;
- unsigned tb_to_us;
- #define TICKLEN_SCALE NTP_SCALE_SHIFT
- static u64 last_tick_len;     /* units are ns / 2^TICKLEN_SCALE */
- static u64 ticklen_to_xs;     /* 0.64 fraction */
- /* If last_tick_len corresponds to about 1/HZ seconds, then
-    last_tick_len << TICKLEN_SHIFT will be about 2^63. */
- #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
  
  DEFINE_SPINLOCK(rtc_lock);
  EXPORT_SYMBOL_GPL(rtc_lock);
@@@ -174,7 -164,6 +164,6 @@@ unsigned long ppc_proc_freq
  EXPORT_SYMBOL(ppc_proc_freq);
  unsigned long ppc_tb_freq;
  
- static u64 tb_last_jiffy __cacheline_aligned_in_smp;
  static DEFINE_PER_CPU(u64, last_jiffy);
  
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@@ -446,7 -435,6 +435,6 @@@ EXPORT_SYMBOL(profile_pc)
  
  static int __init iSeries_tb_recal(void)
  {
-       struct div_result divres;
        unsigned long titan, tb;
  
        /* Make sure we only run on iSeries */
                                tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
                                tb_ticks_per_sec   = new_tb_ticks_per_sec;
                                calc_cputime_factors();
-                               div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
-                               tb_to_xs = divres.result_low;
                                vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
-                               vdso_data->tb_to_xs = tb_to_xs;
                                setup_cputime_one_jiffy();
                        }
                        else {
@@@ -643,27 -628,9 +628,9 @@@ void timer_interrupt(struct pt_regs * r
        trace_timer_interrupt_exit(regs);
  }
  
- void wakeup_decrementer(void)
- {
-       unsigned long ticks;
-       /*
-        * The timebase gets saved on sleep and restored on wakeup,
-        * so all we need to do is to reset the decrementer.
-        */
-       ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
-       if (ticks < tb_ticks_per_jiffy)
-               ticks = tb_ticks_per_jiffy - ticks;
-       else
-               ticks = 1;
-       set_dec(ticks);
- }
  #ifdef CONFIG_SUSPEND
- void generic_suspend_disable_irqs(void)
static void generic_suspend_disable_irqs(void)
  {
-       preempt_disable();
        /* Disable the decrementer, so that it doesn't interfere
         * with suspending.
         */
        set_dec(0x7fffffff);
  }
  
- void generic_suspend_enable_irqs(void)
static void generic_suspend_enable_irqs(void)
  {
-       wakeup_decrementer();
        local_irq_enable();
-       preempt_enable();
  }
  
  /* Overrides the weak version in kernel/power/main.c */
@@@ -698,23 -662,6 +662,6 @@@ void arch_suspend_enable_irqs(void
  }
  #endif
  
- #ifdef CONFIG_SMP
- void __init smp_space_timers(unsigned int max_cpus)
- {
-       int i;
-       u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
-       /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
-       previous_tb -= tb_ticks_per_jiffy;
-       for_each_possible_cpu(i) {
-               if (i == boot_cpuid)
-                       continue;
-               per_cpu(last_jiffy, i) = previous_tb;
-       }
- }
- #endif
  /*
   * Scheduler clock - returns current time in nanosec units.
   *
@@@ -849,25 -796,10 +796,30 @@@ static cycle_t timebase_read(struct clo
        return (cycle_t)get_tb();
  }
  
 -static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
 -                             u64 new_tb_to_xs, struct timespec *now,
 -                             u32 frac_sec)
 +void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
 +                      struct clocksource *clock, u32 mult)
  {
 +      u64 new_tb_to_xs, new_stamp_xsec;
++      u32 frac_sec;
 +
 +      if (clock != &clocksource_timebase)
 +              return;
 +
 +      /* Make userspace gettimeofday spin until we're done. */
 +      ++vdso_data->tb_update_count;
 +      smp_mb();
 +
 +      /* XXX this assumes clock->shift == 22 */
 +      /* 4611686018 ~= 2^(20+64-22) / 1e9 */
 +      new_tb_to_xs = (u64) mult * 4611686018ULL;
 +      new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
 +      do_div(new_stamp_xsec, 1000000000);
 +      new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
 +
++      BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
++      /* this is tv_nsec / 1e9 as a 0.32 fraction */
++      frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
++
        /*
         * tb_update_count is used to allow the userspace gettimeofday code
         * to assure itself that it sees a consistent view of the tb_to_xs and
         * We expect the caller to have done the first increment of
         * vdso_data->tb_update_count already.
         */
 -      vdso_data->tb_orig_stamp = new_tb_stamp;
 +      vdso_data->tb_orig_stamp = clock->cycle_last;
        vdso_data->stamp_xsec = new_stamp_xsec;
        vdso_data->tb_to_xs = new_tb_to_xs;
 -      vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
 -      vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
 -      vdso_data->stamp_xtime = *now;
 +      vdso_data->wtom_clock_sec = wtm->tv_sec;
 +      vdso_data->wtom_clock_nsec = wtm->tv_nsec;
 +      vdso_data->stamp_xtime = *wall_time;
+       vdso_data->stamp_sec_fraction = frac_sec;
        smp_wmb();
        ++(vdso_data->tb_update_count);
  }
  
 -void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
 -                   u32 mult)
 -{
 -      u64 t2x, stamp_xsec;
 -      u32 frac_sec;
 -
 -      if (clock != &clocksource_timebase)
 -              return;
 -
 -      /* Make userspace gettimeofday spin until we're done. */
 -      ++vdso_data->tb_update_count;
 -      smp_mb();
 -
 -      /* XXX this assumes clock->shift == 22 */
 -      /* 4611686018 ~= 2^(20+64-22) / 1e9 */
 -      t2x = (u64) mult * 4611686018ULL;
 -      stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
 -      do_div(stamp_xsec, 1000000000);
 -      stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
 -
 -      BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
 -      /* this is tv_nsec / 1e9 as a 0.32 fraction */
 -      frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
 -      update_gtod(clock->cycle_last, stamp_xsec, t2x, wall_time, frac_sec);
 -}
 -
  void update_vsyscall_tz(void)
  {
        /* Make userspace gettimeofday spin until we're done. */
@@@ -1002,15 -961,13 +955,13 @@@ void secondary_cpu_time_init(void
  /* This function is only called on the boot processor */
  void __init time_init(void)
  {
-       unsigned long flags;
        struct div_result res;
-       u64 scale, x;
+       u64 scale;
        unsigned shift;
  
        if (__USE_RTC()) {
                /* 601 processor: dec counts down by 128 every 128ns */
                ppc_tb_freq = 1000000000;
-               tb_last_jiffy = get_rtcl();
        } else {
                /* Normal PowerPC with timebase register */
                ppc_md.calibrate_decr();
                       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
                printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
                       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
-               tb_last_jiffy = get_tb();
        }
  
        tb_ticks_per_jiffy = ppc_tb_freq / HZ;
        tb_ticks_per_sec = ppc_tb_freq;
        tb_ticks_per_usec = ppc_tb_freq / 1000000;
-       tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
        calc_cputime_factors();
        setup_cputime_one_jiffy();
  
-       /*
-        * Calculate the length of each tick in ns.  It will not be
-        * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
-        * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
-        * rounded up.
-        */
-       x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
-       do_div(x, ppc_tb_freq);
-       tick_nsec = x;
-       last_tick_len = x << TICKLEN_SCALE;
-       /*
-        * Compute ticklen_to_xs, which is a factor which gets multiplied
-        * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
-        * It is computed as:
-        * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
-        * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
-        * which turns out to be N = 51 - SHIFT_HZ.
-        * This gives the result as a 0.64 fixed-point fraction.
-        * That value is reduced by an offset amounting to 1 xsec per
-        * 2^31 timebase ticks to avoid problems with time going backwards
-        * by 1 xsec when we do timer_recalc_offset due to losing the
-        * fractional xsec.  That offset is equal to ppc_tb_freq/2^51
-        * since there are 2^20 xsec in a second.
-        */
-       div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
-                    tb_ticks_per_jiffy << SHIFT_HZ, &res);
-       div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
-       ticklen_to_xs = res.result_low;
-       /* Compute tb_to_xs from tick_nsec */
-       tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
        /*
         * Compute scale factor for sched_clock.
         * The calibrate_decr() function has set tb_ticks_per_sec,
        /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
        boot_tb = get_tb_or_rtc();
  
-       write_seqlock_irqsave(&xtime_lock, flags);
        /* If platform provided a timezone (pmac), we correct the time */
          if (timezone_offset) {
                sys_tz.tz_minuteswest = -timezone_offset / 60;
                sys_tz.tz_dsttime = 0;
          }
  
-       vdso_data->tb_orig_stamp = tb_last_jiffy;
        vdso_data->tb_update_count = 0;
        vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
-       vdso_data->stamp_xsec = (u64) get_seconds() * XSEC_PER_SEC;
-       vdso_data->tb_to_xs = tb_to_xs;
-       write_sequnlock_irqrestore(&xtime_lock, flags);
  
        /* Start the decrementer on CPUs that have manual control
         * such as BookE
@@@ -1190,39 -1105,6 +1099,6 @@@ void to_tm(int tim, struct rtc_time * t
        GregorianDay(tm);
  }
  
- /* Auxiliary function to compute scaling factors */
- /* Actually the choice of a timebase running at 1/4 the of the bus
-  * frequency giving resolution of a few tens of nanoseconds is quite nice.
-  * It makes this computation very precise (27-28 bits typically) which
-  * is optimistic considering the stability of most processor clock
-  * oscillators and the precision with which the timebase frequency
-  * is measured but does not harm.
-  */
- unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
- {
-         unsigned mlt=0, tmp, err;
-         /* No concern for performance, it's done once: use a stupid
-          * but safe and compact method to find the multiplier.
-          */
-   
-         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
-                 if (mulhwu(inscale, mlt|tmp) < outscale)
-                       mlt |= tmp;
-         }
-   
-         /* We might still be off by 1 for the best approximation.
-          * A side effect of this is that if outscale is too large
-          * the returned value will be zero.
-          * Many corner cases have been checked and seem to work,
-          * some might have been forgotten in the test however.
-          */
-   
-         err = inscale * (mlt+1);
-         if (err <= inscale/2)
-               mlt++;
-         return mlt;
- }
  /*
   * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
   * result.
This page took 0.069814 seconds and 4 git commands to generate.