Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "This update brings along:

   - Two fixes for long standing bugs in the hrtimer code, one which
     prevents remote enqueuing and the other preventing arbitrary delays
     after a interrupt hang was detected

   - A fix in the timer wheel which prevents math overflow

   - A fix for a long standing issue with the architected ARM timer
     related to the C3STOP mechanism.

   - A trivial compile fix for nspire SoC clocksource"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timer: Prevent overflow in apply_slack
  hrtimer: Prevent remote enqueue of leftmost timers
  hrtimer: Prevent all reprogramming if hang detected
  clocksource: nspire: Fix compiler warning
  clocksource: arch_arm_timer: Fix age-old arch timer C3STOP detection issue
This commit is contained in:
Linus Torvalds 2014-05-03 08:31:45 -07:00
commit 98facf0e1e
5 changed files with 37 additions and 3 deletions

View File

@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
- clock-frequency : The frequency of the main counter, in Hz. Optional.
- always-on : a boolean property. If present, the timer is powered through an
always-on power domain, therefore it never loses context.
Example:
timer {

View File

@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
/*
@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_CP15_TIMER) {
clk->features |= CLOCK_EVT_FEAT_C3STOP;
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
}
}
arch_timer_c3stop = !of_property_read_bool(np, "always-on");
arch_timer_register();
arch_timer_common_init();
}

View File

@ -212,4 +212,9 @@ static int __init zevio_timer_add(struct device_node *node)
return ret;
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
static void __init zevio_timer_init(struct device_node *node)
{
BUG_ON(zevio_timer_add(node));
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);

View File

@ -234,6 +234,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
goto again;
}
timer->base = new_base;
} else {
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
goto again;
}
}
return new_base;
}
@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
cpu_base->expires_next.tv64 = expires_next.tv64;
/*
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
* scenario:
* T1 expires 50ms from now
* T2 expires 5s from now
*
* T1 is removed, so this code is called and would reprogram
* the hardware to 5s from now. Any hrtimer_start after that
* will not reprogram the hardware due to hang_detected being
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
if (cpu_base->hang_detected)
return;
if (cpu_base->expires_next.tv64 != KTIME_MAX)
tick_program_event(cpu_base->expires_next, 1);
}

View File

@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
bit = find_last_bit(&mask, BITS_PER_LONG);
mask = (1 << bit) - 1;
mask = (1UL << bit) - 1;
expires_limit = expires_limit & ~(mask);