mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:30:52 +07:00
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer type cleanups from Thomas Gleixner: "This series does a tree wide cleanup of types related to timers/timekeeping. - Get rid of cycles_t and use a plain u64. The type is not really helpful and caused more confusion than clarity - Get rid of the ktime union. The union has become useless as we use the scalar nanoseconds storage unconditionally now. The 32bit timespec alike storage got removed due to the Y2038 limitations some time ago. That leaves the odd union access around for no reason. Clean it up. Both changes have been done with coccinelle and a small amount of manual mopping up" * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ktime: Get rid of ktime_equal() ktime: Cleanup ktime_set() usage ktime: Get rid of the union clocksource: Use a plain u64 instead of cycle_t
This commit is contained in:
commit
3ddc76dfc7
@ -133,7 +133,7 @@ init_rtc_clockevent(void)
|
||||
* The QEMU clock as a clocksource primitive.
|
||||
*/
|
||||
|
||||
static cycle_t
|
||||
static u64
|
||||
qemu_cs_read(struct clocksource *cs)
|
||||
{
|
||||
return qemu_get_vmtime();
|
||||
@ -260,7 +260,7 @@ common_init_rtc(void)
|
||||
* use this method when WTINT is in use.
|
||||
*/
|
||||
|
||||
static cycle_t read_rpcc(struct clocksource *cs)
|
||||
static u64 read_rpcc(struct clocksource *cs)
|
||||
{
|
||||
return rpcc();
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ static void __init timer_init(void)
|
||||
/*
|
||||
* clocksource
|
||||
*/
|
||||
static cycle_t read_cycles(struct clocksource *cs)
|
||||
static u64 read_cycles(struct clocksource *cs)
|
||||
{
|
||||
struct timer_s *t = &timers[TID_CLOCKSOURCE];
|
||||
|
||||
|
@ -59,13 +59,13 @@ static u64 notrace ep93xx_read_sched_clock(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
cycle_t ep93xx_clocksource_read(struct clocksource *c)
|
||||
u64 ep93xx_clocksource_read(struct clocksource *c)
|
||||
{
|
||||
u64 ret;
|
||||
|
||||
ret = readl(EP93XX_TIMER4_VALUE_LOW);
|
||||
ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
|
||||
return (cycle_t) ret;
|
||||
return (u64) ret;
|
||||
}
|
||||
|
||||
static int ep93xx_clkevt_set_next_event(unsigned long next,
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#include "common.h"
|
||||
|
||||
static cycle_t cksrc_dc21285_read(struct clocksource *cs)
|
||||
static u64 cksrc_dc21285_read(struct clocksource *cs)
|
||||
{
|
||||
return cs->mask - *CSR_TIMER2_VALUE;
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ static u64 notrace ixp4xx_read_sched_clock(void)
|
||||
* clocksource
|
||||
*/
|
||||
|
||||
static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
|
||||
static u64 ixp4xx_clocksource_read(struct clocksource *c)
|
||||
{
|
||||
return *IXP4XX_OSTS;
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ static struct clock_event_device ckevt = {
|
||||
.set_state_oneshot = timer_set_shutdown,
|
||||
};
|
||||
|
||||
static cycle_t clksrc_read(struct clocksource *cs)
|
||||
static u64 clksrc_read(struct clocksource *cs)
|
||||
{
|
||||
return timer_read();
|
||||
}
|
||||
|
@ -369,9 +369,9 @@ static bool use_gptimer_clksrc __initdata;
|
||||
/*
|
||||
* clocksource
|
||||
*/
|
||||
static cycle_t clocksource_read_cycles(struct clocksource *cs)
|
||||
static u64 clocksource_read_cycles(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)__omap_dm_timer_read_counter(&clksrc,
|
||||
return (u64)__omap_dm_timer_read_counter(&clksrc,
|
||||
OMAP_TIMER_NONPOSTED);
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
/*
|
||||
* IOP clocksource (free-running timer 1).
|
||||
*/
|
||||
static cycle_t notrace iop_clocksource_read(struct clocksource *unused)
|
||||
static u64 notrace iop_clocksource_read(struct clocksource *unused)
|
||||
{
|
||||
return 0xffffffffu - read_tcr1();
|
||||
}
|
||||
|
@ -20,9 +20,9 @@
|
||||
|
||||
static bool disable_cpu_idle_poll;
|
||||
|
||||
static cycle_t read_cycle_count(struct clocksource *cs)
|
||||
static u64 read_cycle_count(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)sysreg_read(COUNT);
|
||||
return (u64)sysreg_read(COUNT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
|
||||
|
||||
static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
|
||||
static notrace u64 bfin_read_cycles(struct clocksource *cs)
|
||||
{
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
|
||||
@ -80,7 +80,7 @@ void __init setup_gptimer0(void)
|
||||
enable_gptimers(TIMER0bit);
|
||||
}
|
||||
|
||||
static cycle_t bfin_read_gptimer0(struct clocksource *cs)
|
||||
static u64 bfin_read_gptimer0(struct clocksource *cs)
|
||||
{
|
||||
return bfin_read_TIMER0_COUNTER();
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
static u32 sched_clock_multiplier;
|
||||
#define SCHED_CLOCK_SHIFT 16
|
||||
|
||||
static cycle_t tsc_read(struct clocksource *cs)
|
||||
static u64 tsc_read(struct clocksource *cs)
|
||||
{
|
||||
return get_cycles();
|
||||
}
|
||||
|
@ -72,9 +72,9 @@ struct adsp_hw_timer_struct {
|
||||
/* Look for "TCX0" for related constants. */
|
||||
static __iomem struct adsp_hw_timer_struct *rtos_timer;
|
||||
|
||||
static cycle_t timer_get_cycles(struct clocksource *cs)
|
||||
static u64 timer_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t) __vmgettime();
|
||||
return (u64) __vmgettime();
|
||||
}
|
||||
|
||||
static struct clocksource hexagon_clocksource = {
|
||||
|
@ -21,9 +21,9 @@ void __init cyclone_setup(void)
|
||||
|
||||
static void __iomem *cyclone_mc;
|
||||
|
||||
static cycle_t read_cyclone(struct clocksource *cs)
|
||||
static u64 read_cyclone(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)readq((void __iomem *)cyclone_mc);
|
||||
return (u64)readq((void __iomem *)cyclone_mc);
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_cyclone = {
|
||||
|
@ -9,15 +9,15 @@ struct fsyscall_gtod_data_t {
|
||||
seqcount_t seq;
|
||||
struct timespec wall_time;
|
||||
struct timespec monotonic_time;
|
||||
cycle_t clk_mask;
|
||||
u64 clk_mask;
|
||||
u32 clk_mult;
|
||||
u32 clk_shift;
|
||||
void *clk_fsys_mmio;
|
||||
cycle_t clk_cycle_last;
|
||||
u64 clk_cycle_last;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct itc_jitter_data_t {
|
||||
int itc_jitter;
|
||||
cycle_t itc_lastcycle;
|
||||
u64 itc_lastcycle;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#include "fsyscall_gtod_data.h"
|
||||
|
||||
static cycle_t itc_get_cycles(struct clocksource *cs);
|
||||
static u64 itc_get_cycles(struct clocksource *cs);
|
||||
|
||||
struct fsyscall_gtod_data_t fsyscall_gtod_data;
|
||||
|
||||
@ -323,7 +323,7 @@ void ia64_init_itm(void)
|
||||
}
|
||||
}
|
||||
|
||||
static cycle_t itc_get_cycles(struct clocksource *cs)
|
||||
static u64 itc_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
unsigned long lcycle, now, ret;
|
||||
|
||||
@ -397,7 +397,7 @@ void update_vsyscall_tz(void)
|
||||
}
|
||||
|
||||
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
||||
struct clocksource *c, u32 mult, cycle_t cycle_last)
|
||||
struct clocksource *c, u32 mult, u64 cycle_last)
|
||||
{
|
||||
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
||||
|
||||
|
@ -22,9 +22,9 @@
|
||||
|
||||
extern unsigned long sn_rtc_cycles_per_second;
|
||||
|
||||
static cycle_t read_sn2(struct clocksource *cs)
|
||||
static u64 read_sn2(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)readq(RTC_COUNTER_ADDR);
|
||||
return (u64)readq(RTC_COUNTER_ADDR);
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_sn2 = {
|
||||
|
@ -76,7 +76,7 @@ static struct irqaction m68328_timer_irq = {
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static cycle_t m68328_read_clk(struct clocksource *cs)
|
||||
static u64 m68328_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cycles;
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
|
||||
#define DMA_DTMR_ENABLE (1 << 0)
|
||||
|
||||
static cycle_t cf_dt_get_cycles(struct clocksource *cs)
|
||||
static u64 cf_dt_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return __raw_readl(DTCN0);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ static struct irqaction pit_irq = {
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static cycle_t pit_read_clk(struct clocksource *cs)
|
||||
static u64 pit_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cycles;
|
||||
|
@ -97,7 +97,7 @@ static struct irqaction mcfslt_timer_irq = {
|
||||
.handler = mcfslt_tick,
|
||||
};
|
||||
|
||||
static cycle_t mcfslt_read_clk(struct clocksource *cs)
|
||||
static u64 mcfslt_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cycles, scnt;
|
||||
|
@ -89,7 +89,7 @@ static struct irqaction mcftmr_timer_irq = {
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static cycle_t mcftmr_read_clk(struct clocksource *cs)
|
||||
static u64 mcftmr_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cycles;
|
||||
|
@ -190,17 +190,17 @@ static u64 xilinx_clock_read(void)
|
||||
return read_fn(timer_baseaddr + TCR1);
|
||||
}
|
||||
|
||||
static cycle_t xilinx_read(struct clocksource *cs)
|
||||
static u64 xilinx_read(struct clocksource *cs)
|
||||
{
|
||||
/* reading actual value of timer 1 */
|
||||
return (cycle_t)xilinx_clock_read();
|
||||
return (u64)xilinx_clock_read();
|
||||
}
|
||||
|
||||
static struct timecounter xilinx_tc = {
|
||||
.cc = NULL,
|
||||
};
|
||||
|
||||
static cycle_t xilinx_cc_read(const struct cyclecounter *cc)
|
||||
static u64 xilinx_cc_read(const struct cyclecounter *cc)
|
||||
{
|
||||
return xilinx_read(NULL);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@
|
||||
/* 32kHz clock enabled and detected */
|
||||
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
|
||||
|
||||
static cycle_t au1x_counter1_read(struct clocksource *cs)
|
||||
static u64 au1x_counter1_read(struct clocksource *cs)
|
||||
{
|
||||
return alchemy_rdsys(AU1000_SYS_RTCREAD);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ void octeon_init_cvmcount(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static cycle_t octeon_cvmcount_read(struct clocksource *cs)
|
||||
static u64 octeon_cvmcount_read(struct clocksource *cs)
|
||||
{
|
||||
return read_c0_cvmcount();
|
||||
}
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
static uint16_t jz4740_jiffies_per_tick;
|
||||
|
||||
static cycle_t jz4740_clocksource_read(struct clocksource *cs)
|
||||
static u64 jz4740_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return jz4740_timer_get_count(TIMER_CLOCKSOURCE);
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ struct txx9_clocksource {
|
||||
struct txx9_tmr_reg __iomem *tmrptr;
|
||||
};
|
||||
|
||||
static cycle_t txx9_cs_read(struct clocksource *cs)
|
||||
static u64 txx9_cs_read(struct clocksource *cs)
|
||||
{
|
||||
struct txx9_clocksource *txx9_cs =
|
||||
container_of(cs, struct txx9_clocksource, cs);
|
||||
|
@ -25,9 +25,9 @@
|
||||
|
||||
#include <asm/sibyte/sb1250.h>
|
||||
|
||||
static cycle_t bcm1480_hpt_read(struct clocksource *cs)
|
||||
static u64 bcm1480_hpt_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
||||
return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
||||
}
|
||||
|
||||
struct clocksource bcm1480_clocksource = {
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <asm/dec/ioasic.h>
|
||||
#include <asm/dec/ioasic_addrs.h>
|
||||
|
||||
static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
|
||||
static u64 dec_ioasic_hpt_read(struct clocksource *cs)
|
||||
{
|
||||
return ioasic_read(IO_REG_FCTR);
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#include <asm/time.h>
|
||||
|
||||
static cycle_t c0_hpt_read(struct clocksource *cs)
|
||||
static u64 c0_hpt_read(struct clocksource *cs)
|
||||
{
|
||||
return read_c0_count();
|
||||
}
|
||||
|
@ -30,7 +30,7 @@
|
||||
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
|
||||
* again.
|
||||
*/
|
||||
static inline cycle_t sb1250_hpt_get_cycles(void)
|
||||
static inline u64 sb1250_hpt_get_cycles(void)
|
||||
{
|
||||
unsigned int count;
|
||||
void __iomem *addr;
|
||||
@ -41,7 +41,7 @@ static inline cycle_t sb1250_hpt_get_cycles(void)
|
||||
return SB1250_HPT_VALUE - count;
|
||||
}
|
||||
|
||||
static cycle_t sb1250_hpt_read(struct clocksource *cs)
|
||||
static u64 sb1250_hpt_read(struct clocksource *cs)
|
||||
{
|
||||
return sb1250_hpt_get_cycles();
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ void __init ls1x_pwmtimer_init(void)
|
||||
ls1x_pwmtimer_restart();
|
||||
}
|
||||
|
||||
static cycle_t ls1x_clocksource_read(struct clocksource *cs)
|
||||
static u64 ls1x_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int count;
|
||||
@ -107,7 +107,7 @@ static cycle_t ls1x_clocksource_read(struct clocksource *cs)
|
||||
|
||||
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
|
||||
|
||||
return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count;
|
||||
return (u64) (jifs * ls1x_jiffies_per_tick) + count;
|
||||
}
|
||||
|
||||
static struct clocksource ls1x_clocksource = {
|
||||
|
@ -144,7 +144,7 @@ void __init setup_mfgpt0_timer(void)
|
||||
* to just read by itself. So use jiffies to emulate a free
|
||||
* running counter:
|
||||
*/
|
||||
static cycle_t mfgpt_read(struct clocksource *cs)
|
||||
static u64 mfgpt_read(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int count;
|
||||
@ -188,7 +188,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
|
||||
|
||||
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
|
||||
|
||||
return (cycle_t) (jifs * COMPARE) + count;
|
||||
return (u64) (jifs * COMPARE) + count;
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_mfgpt = {
|
||||
|
@ -248,9 +248,9 @@ void __init setup_hpet_timer(void)
|
||||
pr_info("hpet clock event device register\n");
|
||||
}
|
||||
|
||||
static cycle_t hpet_read_counter(struct clocksource *cs)
|
||||
static u64 hpet_read_counter(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)hpet_read(HPET_COUNTER);
|
||||
return (u64)hpet_read(HPET_COUNTER);
|
||||
}
|
||||
|
||||
static void hpet_suspend(struct clocksource *cs)
|
||||
|
@ -75,7 +75,7 @@ static void __init estimate_frequencies(void)
|
||||
unsigned int count, start;
|
||||
unsigned char secs1, secs2, ctrl;
|
||||
int secs;
|
||||
cycle_t giccount = 0, gicstart = 0;
|
||||
u64 giccount = 0, gicstart = 0;
|
||||
|
||||
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
|
||||
mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
|
||||
|
@ -59,14 +59,14 @@ unsigned int get_c0_compare_int(void)
|
||||
return IRQ_TIMER;
|
||||
}
|
||||
|
||||
static cycle_t nlm_get_pic_timer(struct clocksource *cs)
|
||||
static u64 nlm_get_pic_timer(struct clocksource *cs)
|
||||
{
|
||||
uint64_t picbase = nlm_get_node(0)->picbase;
|
||||
|
||||
return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
|
||||
}
|
||||
|
||||
static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
|
||||
static u64 nlm_get_pic_timer32(struct clocksource *cs)
|
||||
{
|
||||
uint64_t picbase = nlm_get_node(0)->picbase;
|
||||
|
||||
|
@ -140,7 +140,7 @@ static void __init hub_rt_clock_event_global_init(void)
|
||||
setup_irq(irq, &hub_rt_irqaction);
|
||||
}
|
||||
|
||||
static cycle_t hub_rt_read(struct clocksource *cs)
|
||||
static u64 hub_rt_read(struct clocksource *cs)
|
||||
{
|
||||
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <asm/timex.h>
|
||||
#include "internal.h"
|
||||
|
||||
static cycle_t mn10300_read(struct clocksource *cs)
|
||||
static u64 mn10300_read(struct clocksource *cs)
|
||||
{
|
||||
return read_timestamp_counter();
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ static inline unsigned long read_timersnapshot(struct nios2_timer *timer)
|
||||
return count;
|
||||
}
|
||||
|
||||
static cycle_t nios2_timer_read(struct clocksource *cs)
|
||||
static u64 nios2_timer_read(struct clocksource *cs)
|
||||
{
|
||||
struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
|
||||
unsigned long flags;
|
||||
|
@ -117,9 +117,9 @@ static __init void openrisc_clockevent_init(void)
|
||||
* is 32 bits wide and runs at the CPU clock frequency.
|
||||
*/
|
||||
|
||||
static cycle_t openrisc_timer_read(struct clocksource *cs)
|
||||
static u64 openrisc_timer_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t) mfspr(SPR_TTCR);
|
||||
return (u64) mfspr(SPR_TTCR);
|
||||
}
|
||||
|
||||
static struct clocksource openrisc_timer = {
|
||||
|
@ -137,7 +137,7 @@ EXPORT_SYMBOL(profile_pc);
|
||||
|
||||
/* clock source code */
|
||||
|
||||
static cycle_t notrace read_cr16(struct clocksource *cs)
|
||||
static u64 notrace read_cr16(struct clocksource *cs)
|
||||
{
|
||||
return get_cycles();
|
||||
}
|
||||
|
@ -80,7 +80,7 @@
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/timekeeper_internal.h>
|
||||
|
||||
static cycle_t rtc_read(struct clocksource *);
|
||||
static u64 rtc_read(struct clocksource *);
|
||||
static struct clocksource clocksource_rtc = {
|
||||
.name = "rtc",
|
||||
.rating = 400,
|
||||
@ -89,7 +89,7 @@ static struct clocksource clocksource_rtc = {
|
||||
.read = rtc_read,
|
||||
};
|
||||
|
||||
static cycle_t timebase_read(struct clocksource *);
|
||||
static u64 timebase_read(struct clocksource *);
|
||||
static struct clocksource clocksource_timebase = {
|
||||
.name = "timebase",
|
||||
.rating = 400,
|
||||
@ -802,18 +802,18 @@ void read_persistent_clock(struct timespec *ts)
|
||||
}
|
||||
|
||||
/* clocksource code */
|
||||
static cycle_t rtc_read(struct clocksource *cs)
|
||||
static u64 rtc_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)get_rtc();
|
||||
return (u64)get_rtc();
|
||||
}
|
||||
|
||||
static cycle_t timebase_read(struct clocksource *cs)
|
||||
static u64 timebase_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)get_tb();
|
||||
return (u64)get_tb();
|
||||
}
|
||||
|
||||
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
||||
struct clocksource *clock, u32 mult, cycle_t cycle_last)
|
||||
struct clocksource *clock, u32 mult, u64 cycle_last)
|
||||
{
|
||||
u64 new_tb_to_xs, new_stamp_xsec;
|
||||
u32 frac_sec;
|
||||
|
@ -1872,8 +1872,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
|
||||
/ tb_ticks_per_sec;
|
||||
hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
|
||||
HRTIMER_MODE_REL);
|
||||
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
|
||||
vcpu->arch.timer_running = 1;
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
|
||||
smp_wmb(); /* insure spu event buffer updates are written */
|
||||
/* don't want events intermingled... */
|
||||
|
||||
kt = ktime_set(0, profiling_interval);
|
||||
kt = profiling_interval;
|
||||
if (!spu_prof_running)
|
||||
goto stop;
|
||||
hrtimer_forward(timer, timer->base->get_time(), kt);
|
||||
@ -204,7 +204,7 @@ int start_spu_profiling_cycles(unsigned int cycles_reset)
|
||||
ktime_t kt;
|
||||
|
||||
pr_debug("timer resolution: %lu\n", TICK_NSEC);
|
||||
kt = ktime_set(0, profiling_interval);
|
||||
kt = profiling_interval;
|
||||
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&timer, kt);
|
||||
timer.function = profile_spus;
|
||||
|
@ -209,7 +209,7 @@ void read_boot_clock64(struct timespec64 *ts)
|
||||
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
|
||||
}
|
||||
|
||||
static cycle_t read_tod_clock(struct clocksource *cs)
|
||||
static u64 read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
unsigned long long now, adj;
|
||||
|
||||
|
@ -1019,7 +1019,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
|
||||
__set_cpu_idle(vcpu);
|
||||
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
|
||||
hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
|
||||
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
|
||||
no_timer:
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
@ -148,7 +148,7 @@ static unsigned int sbus_cycles_offset(void)
|
||||
return offset;
|
||||
}
|
||||
|
||||
static cycle_t timer_cs_read(struct clocksource *cs)
|
||||
static u64 timer_cs_read(struct clocksource *cs)
|
||||
{
|
||||
unsigned int seq, offset;
|
||||
u64 cycles;
|
||||
|
@ -770,7 +770,7 @@ void udelay(unsigned long usecs)
|
||||
}
|
||||
EXPORT_SYMBOL(udelay);
|
||||
|
||||
static cycle_t clocksource_tick_read(struct clocksource *cs)
|
||||
static u64 clocksource_tick_read(struct clocksource *cs)
|
||||
{
|
||||
return tick_ops->get_tick();
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static irqreturn_t um_timer(int irq, void *dev)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static cycle_t timer_read(struct clocksource *cs)
|
||||
static u64 timer_read(struct clocksource *cs)
|
||||
{
|
||||
return os_nsecs() / TIMER_MULTIPLIER;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static struct clock_event_device ckevt_puv3_osmr0 = {
|
||||
.set_state_oneshot = puv3_osmr0_shutdown,
|
||||
};
|
||||
|
||||
static cycle_t puv3_read_oscr(struct clocksource *cs)
|
||||
static u64 puv3_read_oscr(struct clocksource *cs)
|
||||
{
|
||||
return readl(OST_OSCR);
|
||||
}
|
||||
|
@ -92,10 +92,10 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
|
||||
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
|
||||
}
|
||||
|
||||
static notrace cycle_t vread_pvclock(int *mode)
|
||||
static notrace u64 vread_pvclock(int *mode)
|
||||
{
|
||||
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
u64 last;
|
||||
u32 version;
|
||||
|
||||
@ -142,9 +142,9 @@ static notrace cycle_t vread_pvclock(int *mode)
|
||||
}
|
||||
#endif
|
||||
|
||||
notrace static cycle_t vread_tsc(void)
|
||||
notrace static u64 vread_tsc(void)
|
||||
{
|
||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
||||
u64 ret = (u64)rdtsc_ordered();
|
||||
u64 last = gtod->cycle_last;
|
||||
|
||||
if (likely(ret >= last))
|
||||
|
@ -768,7 +768,7 @@ struct kvm_arch {
|
||||
spinlock_t pvclock_gtod_sync_lock;
|
||||
bool use_master_clock;
|
||||
u64 master_kernel_ns;
|
||||
cycle_t master_cycle_now;
|
||||
u64 master_cycle_now;
|
||||
struct delayed_work kvmclock_update_work;
|
||||
struct delayed_work kvmclock_sync_work;
|
||||
|
||||
|
@ -14,7 +14,7 @@ static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
#endif
|
||||
|
||||
/* some helper functions for xen and kvm pv clock sources */
|
||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
||||
void pvclock_set_flags(u8 flags);
|
||||
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
||||
@ -87,11 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
||||
u64 tsc)
|
||||
u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
|
||||
{
|
||||
u64 delta = tsc - src->tsc_timestamp;
|
||||
cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
src->tsc_shift);
|
||||
return src->system_time + offset;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
|
||||
return rdtsc();
|
||||
}
|
||||
|
||||
extern struct system_counterval_t convert_art_to_tsc(cycle_t art);
|
||||
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||
|
||||
extern void tsc_init(void);
|
||||
extern void mark_tsc_unstable(char *reason);
|
||||
|
@ -17,8 +17,8 @@ struct vsyscall_gtod_data {
|
||||
unsigned seq;
|
||||
|
||||
int vclock_mode;
|
||||
cycle_t cycle_last;
|
||||
cycle_t mask;
|
||||
u64 cycle_last;
|
||||
u64 mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
|
||||
|
@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {}
|
||||
static int apbt_clocksource_register(void)
|
||||
{
|
||||
u64 start, now;
|
||||
cycle_t t1;
|
||||
u64 t1;
|
||||
|
||||
/* Start the counter, use timer 2 as source, timer 0/1 for event */
|
||||
dw_apb_clocksource_start(clocksource_apbt);
|
||||
@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void)
|
||||
{
|
||||
int i, scale;
|
||||
u64 old, new;
|
||||
cycle_t t1, t2;
|
||||
u64 t1, t2;
|
||||
unsigned long khz = 0;
|
||||
u32 loop, shift;
|
||||
|
||||
|
@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t read_hv_clock(struct clocksource *arg)
|
||||
static u64 read_hv_clock(struct clocksource *arg)
|
||||
{
|
||||
cycle_t current_tick;
|
||||
u64 current_tick;
|
||||
/*
|
||||
* Read the partition counter to get the current tick count. This count
|
||||
* is set to 0 when the partition is created and is incremented in
|
||||
|
@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = {
|
||||
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
|
||||
};
|
||||
|
||||
static cycle_t read_hpet(struct clocksource *cs)
|
||||
static u64 read_hpet(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
union hpet_lock old, new;
|
||||
@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||
* Read HPET directly if in NMI.
|
||||
*/
|
||||
if (in_nmi())
|
||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||
return (u64)hpet_readl(HPET_COUNTER);
|
||||
|
||||
/*
|
||||
* Read the current state of the lock and HPET value atomically.
|
||||
@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||
WRITE_ONCE(hpet.value, new.value);
|
||||
arch_spin_unlock(&hpet.lock);
|
||||
local_irq_restore(flags);
|
||||
return (cycle_t)new.value;
|
||||
return (u64)new.value;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
@ -843,15 +843,15 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||
new.lockval = READ_ONCE(hpet.lockval);
|
||||
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
|
||||
|
||||
return (cycle_t)new.value;
|
||||
return (u64)new.value;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* For UP or 32-bit.
|
||||
*/
|
||||
static cycle_t read_hpet(struct clocksource *cs)
|
||||
static u64 read_hpet(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||
return (u64)hpet_readl(HPET_COUNTER);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = {
|
||||
static int hpet_clocksource_register(void)
|
||||
{
|
||||
u64 start, now;
|
||||
cycle_t t1;
|
||||
u64 t1;
|
||||
|
||||
/* Start the counter */
|
||||
hpet_restart_counter();
|
||||
|
@ -32,7 +32,7 @@
|
||||
static int kvmclock __ro_after_init = 1;
|
||||
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
||||
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
|
||||
static cycle_t kvm_sched_clock_offset;
|
||||
static u64 kvm_sched_clock_offset;
|
||||
|
||||
static int parse_no_kvmclock(char *arg)
|
||||
{
|
||||
@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static cycle_t kvm_clock_read(void)
|
||||
static u64 kvm_clock_read(void)
|
||||
{
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
int cpu;
|
||||
|
||||
preempt_disable_notrace();
|
||||
@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
|
||||
static u64 kvm_clock_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return kvm_clock_read();
|
||||
}
|
||||
|
||||
static cycle_t kvm_sched_clock_read(void)
|
||||
static u64 kvm_sched_clock_read(void)
|
||||
{
|
||||
return kvm_clock_read() - kvm_sched_clock_offset;
|
||||
}
|
||||
|
@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||
return flags & valid_flags;
|
||||
}
|
||||
|
||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
unsigned version;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
u64 last;
|
||||
u8 flags;
|
||||
|
||||
|
@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs)
|
||||
* checking the result of read_tsc() - cycle_last for being negative.
|
||||
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
|
||||
*/
|
||||
static cycle_t read_tsc(struct clocksource *cs)
|
||||
static u64 read_tsc(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)rdtsc_ordered();
|
||||
return (u64)rdtsc_ordered();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void)
|
||||
/*
|
||||
* Convert ART to TSC given numerator/denominator found in detect_art()
|
||||
*/
|
||||
struct system_counterval_t convert_art_to_tsc(cycle_t art)
|
||||
struct system_counterval_t convert_art_to_tsc(u64 art)
|
||||
{
|
||||
u64 tmp, res, rem;
|
||||
|
||||
|
@ -1106,7 +1106,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
|
||||
now = ktime_get();
|
||||
remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
|
||||
if (ktime_to_ns(remaining) < 0)
|
||||
remaining = ktime_set(0, 0);
|
||||
remaining = 0;
|
||||
|
||||
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
|
||||
tmcct = div64_u64(ns,
|
||||
@ -2057,7 +2057,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
if (apic_lvtt_oneshot(apic)) {
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
apic->lapic_timer.target_expiration = ktime_set(0, 0);
|
||||
apic->lapic_timer.target_expiration = 0;
|
||||
}
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
}
|
||||
|
@ -1131,8 +1131,8 @@ struct pvclock_gtod_data {
|
||||
|
||||
struct { /* extract of a clocksource struct */
|
||||
int vclock_mode;
|
||||
cycle_t cycle_last;
|
||||
cycle_t mask;
|
||||
u64 cycle_last;
|
||||
u64 mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
} clock;
|
||||
@ -1572,9 +1572,9 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static cycle_t read_tsc(void)
|
||||
static u64 read_tsc(void)
|
||||
{
|
||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
||||
u64 ret = (u64)rdtsc_ordered();
|
||||
u64 last = pvclock_gtod_data.clock.cycle_last;
|
||||
|
||||
if (likely(ret >= last))
|
||||
@ -1592,7 +1592,7 @@ static cycle_t read_tsc(void)
|
||||
return last;
|
||||
}
|
||||
|
||||
static inline u64 vgettsc(cycle_t *cycle_now)
|
||||
static inline u64 vgettsc(u64 *cycle_now)
|
||||
{
|
||||
long v;
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
@ -1603,7 +1603,7 @@ static inline u64 vgettsc(cycle_t *cycle_now)
|
||||
return v * gtod->clock.mult;
|
||||
}
|
||||
|
||||
static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
||||
static int do_monotonic_boot(s64 *t, u64 *cycle_now)
|
||||
{
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
unsigned long seq;
|
||||
@ -1624,7 +1624,7 @@ static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
||||
}
|
||||
|
||||
/* returns true if host is using tsc clocksource */
|
||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
|
||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
|
||||
{
|
||||
/* checked again under seqlock below */
|
||||
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
|
||||
|
@ -916,7 +916,7 @@ static unsigned long lguest_tsc_khz(void)
|
||||
* If we can't use the TSC, the kernel falls back to our lower-priority
|
||||
* "lguest_clock", where we read the time value given to us by the Host.
|
||||
*/
|
||||
static cycle_t lguest_clock_read(struct clocksource *cs)
|
||||
static u64 lguest_clock_read(struct clocksource *cs)
|
||||
{
|
||||
unsigned long sec, nsec;
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
#define RTC_NAME "sgi_rtc"
|
||||
|
||||
static cycle_t uv_read_rtc(struct clocksource *cs);
|
||||
static u64 uv_read_rtc(struct clocksource *cs);
|
||||
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
|
||||
static int uv_rtc_shutdown(struct clock_event_device *evt);
|
||||
|
||||
@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = {
|
||||
.name = RTC_NAME,
|
||||
.rating = 299,
|
||||
.read = uv_read_rtc,
|
||||
.mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
||||
.mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||||
* cachelines of it's own page. This allows faster simultaneous reads
|
||||
* from a given socket.
|
||||
*/
|
||||
static cycle_t uv_read_rtc(struct clocksource *cs)
|
||||
static u64 uv_read_rtc(struct clocksource *cs)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs)
|
||||
else
|
||||
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
||||
|
||||
return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
||||
return (u64)uv_read_local_mmr(UVH_RTC | offset);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -39,10 +39,10 @@ static unsigned long xen_tsc_khz(void)
|
||||
return pvclock_tsc_khz(info);
|
||||
}
|
||||
|
||||
cycle_t xen_clocksource_read(void)
|
||||
u64 xen_clocksource_read(void)
|
||||
{
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
|
||||
preempt_disable_notrace();
|
||||
src = &__this_cpu_read(xen_vcpu)->time;
|
||||
@ -51,7 +51,7 @@ cycle_t xen_clocksource_read(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
|
||||
static u64 xen_clocksource_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return xen_clocksource_read();
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ void xen_init_irq_ops(void);
|
||||
void xen_setup_timer(int cpu);
|
||||
void xen_setup_runstate_info(int cpu);
|
||||
void xen_teardown_timer(int cpu);
|
||||
cycle_t xen_clocksource_read(void);
|
||||
u64 xen_clocksource_read(void);
|
||||
void xen_setup_cpu_clockevents(void);
|
||||
void __init xen_init_time_ops(void);
|
||||
void __init xen_hvm_init_time_ops(void);
|
||||
|
@ -34,9 +34,9 @@
|
||||
unsigned long ccount_freq; /* ccount Hz */
|
||||
EXPORT_SYMBOL(ccount_freq);
|
||||
|
||||
static cycle_t ccount_read(struct clocksource *cs)
|
||||
static u64 ccount_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)get_ccount();
|
||||
return (u64)get_ccount();
|
||||
}
|
||||
|
||||
static u64 notrace ccount_sched_clock_read(void)
|
||||
|
@ -2569,7 +2569,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
||||
* This will be replaced with the stats tracking code, using
|
||||
* 'avg_completion_time / 2' as the pre-sleep target.
|
||||
*/
|
||||
kt = ktime_set(0, nsecs);
|
||||
kt = nsecs;
|
||||
|
||||
mode = HRTIMER_MODE_REL;
|
||||
hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
|
||||
|
@ -194,7 +194,7 @@ void device_pm_move_last(struct device *dev)
|
||||
|
||||
static ktime_t initcall_debug_start(struct device *dev)
|
||||
{
|
||||
ktime_t calltime = ktime_set(0, 0);
|
||||
ktime_t calltime = 0;
|
||||
|
||||
if (pm_print_times_enabled) {
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
|
@ -998,14 +998,14 @@ static int print_wakeup_source_stats(struct seq_file *m,
|
||||
|
||||
active_time = ktime_sub(now, ws->last_time);
|
||||
total_time = ktime_add(total_time, active_time);
|
||||
if (active_time.tv64 > max_time.tv64)
|
||||
if (active_time > max_time)
|
||||
max_time = active_time;
|
||||
|
||||
if (ws->autosleep_enabled)
|
||||
prevent_sleep_time = ktime_add(prevent_sleep_time,
|
||||
ktime_sub(now, ws->start_prevent_time));
|
||||
} else {
|
||||
active_time = ktime_set(0, 0);
|
||||
active_time = 0;
|
||||
}
|
||||
|
||||
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
|
||||
|
@ -257,7 +257,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
|
||||
static void null_cmd_end_timer(struct nullb_cmd *cmd)
|
||||
{
|
||||
ktime_t kt = ktime_set(0, completion_nsec);
|
||||
ktime_t kt = completion_nsec;
|
||||
|
||||
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
@ -69,9 +69,9 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
|
||||
#ifdef CONFIG_IA64
|
||||
static void __iomem *hpet_mctr;
|
||||
|
||||
static cycle_t read_hpet(struct clocksource *cs)
|
||||
static u64 read_hpet(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
|
||||
return (u64)read_counter((void __iomem *)hpet_mctr);
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_hpet = {
|
||||
|
@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void)
|
||||
return v2;
|
||||
}
|
||||
|
||||
static cycle_t acpi_pm_read(struct clocksource *cs)
|
||||
static u64 acpi_pm_read(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)read_pmtmr();
|
||||
return (u64)read_pmtmr();
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_acpi_pm = {
|
||||
.name = "acpi_pm",
|
||||
.rating = 200,
|
||||
.read = acpi_pm_read,
|
||||
.mask = (cycle_t)ACPI_PM_MASK,
|
||||
.mask = (u64)ACPI_PM_MASK,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str)
|
||||
}
|
||||
__setup("acpi_pm_good", acpi_pm_good_setup);
|
||||
|
||||
static cycle_t acpi_pm_read_slow(struct clocksource *cs)
|
||||
static u64 acpi_pm_read_slow(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)acpi_pm_read_verified();
|
||||
return (u64)acpi_pm_read_verified();
|
||||
}
|
||||
|
||||
static inline void acpi_pm_need_workaround(void)
|
||||
@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
|
||||
*/
|
||||
static int verify_pmtmr_rate(void)
|
||||
{
|
||||
cycle_t value1, value2;
|
||||
u64 value1, value2;
|
||||
unsigned long count, delta;
|
||||
|
||||
mach_prepare_counter();
|
||||
@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void)
|
||||
|
||||
static int __init init_acpi_pm_clocksource(void)
|
||||
{
|
||||
cycle_t value1, value2;
|
||||
u64 value1, value2;
|
||||
unsigned int i, j = 0;
|
||||
|
||||
if (!pmtmr_ioport)
|
||||
|
@ -56,7 +56,7 @@ static int noinline arc_get_timer_clk(struct device_node *node)
|
||||
|
||||
#ifdef CONFIG_ARC_TIMERS_64BIT
|
||||
|
||||
static cycle_t arc_read_gfrc(struct clocksource *cs)
|
||||
static u64 arc_read_gfrc(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 l, h;
|
||||
@ -71,7 +71,7 @@ static cycle_t arc_read_gfrc(struct clocksource *cs)
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return (((cycle_t)h) << 32) | l;
|
||||
return (((u64)h) << 32) | l;
|
||||
}
|
||||
|
||||
static struct clocksource arc_counter_gfrc = {
|
||||
@ -105,7 +105,7 @@ CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
|
||||
#define AUX_RTC_LOW 0x104
|
||||
#define AUX_RTC_HIGH 0x105
|
||||
|
||||
static cycle_t arc_read_rtc(struct clocksource *cs)
|
||||
static u64 arc_read_rtc(struct clocksource *cs)
|
||||
{
|
||||
unsigned long status;
|
||||
u32 l, h;
|
||||
@ -122,7 +122,7 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
|
||||
status = read_aux_reg(AUX_RTC_CTRL);
|
||||
} while (!(status & _BITUL(31)));
|
||||
|
||||
return (((cycle_t)h) << 32) | l;
|
||||
return (((u64)h) << 32) | l;
|
||||
}
|
||||
|
||||
static struct clocksource arc_counter_rtc = {
|
||||
@ -166,9 +166,9 @@ CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
|
||||
* 32bit TIMER1 to keep counting monotonically and wraparound
|
||||
*/
|
||||
|
||||
static cycle_t arc_read_timer1(struct clocksource *cs)
|
||||
static u64 arc_read_timer1(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
|
||||
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
|
||||
}
|
||||
|
||||
static struct clocksource arc_counter_timer1 = {
|
||||
|
@ -562,12 +562,12 @@ static u64 arch_counter_get_cntvct_mem(void)
|
||||
*/
|
||||
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
|
||||
|
||||
static cycle_t arch_counter_read(struct clocksource *cs)
|
||||
static u64 arch_counter_read(struct clocksource *cs)
|
||||
{
|
||||
return arch_timer_read_counter();
|
||||
}
|
||||
|
||||
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
|
||||
static u64 arch_counter_read_cc(const struct cyclecounter *cc)
|
||||
{
|
||||
return arch_timer_read_counter();
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t gt_clocksource_read(struct clocksource *cs)
|
||||
static u64 gt_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return gt_counter_read();
|
||||
}
|
||||
|
@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
|
||||
*
|
||||
* returns: Current timer counter register value
|
||||
**/
|
||||
static cycle_t __ttc_clocksource_read(struct clocksource *cs)
|
||||
static u64 __ttc_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
|
||||
|
||||
return (cycle_t)readl_relaxed(timer->base_addr +
|
||||
return (u64)readl_relaxed(timer->base_addr +
|
||||
TTC_COUNT_VAL_OFFSET);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
static void __iomem *clksrc_dbx500_timer_base;
|
||||
|
||||
static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
|
||||
static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
|
||||
{
|
||||
void __iomem *base = clksrc_dbx500_timer_base;
|
||||
u32 count, count2;
|
||||
|
@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
|
||||
dw_apb_clocksource_read(dw_cs);
|
||||
}
|
||||
|
||||
static cycle_t __apbt_read_clocksource(struct clocksource *cs)
|
||||
static u64 __apbt_read_clocksource(struct clocksource *cs)
|
||||
{
|
||||
u32 current_count;
|
||||
struct dw_apb_clocksource *dw_cs =
|
||||
@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs)
|
||||
current_count = apbt_readl_relaxed(&dw_cs->timer,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
|
||||
return (cycle_t)~current_count;
|
||||
return (u64)~current_count;
|
||||
}
|
||||
|
||||
static void apbt_restart_clocksource(struct clocksource *cs)
|
||||
@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
|
||||
*
|
||||
* @dw_cs: The clocksource to read.
|
||||
*/
|
||||
cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
|
||||
u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
|
||||
{
|
||||
return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
|
||||
return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
|
||||
}
|
||||
|
@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p)
|
||||
clk_disable_unprepare(p->clk);
|
||||
}
|
||||
|
||||
static cycle_t em_sti_count(struct em_sti_priv *p)
|
||||
static u64 em_sti_count(struct em_sti_priv *p)
|
||||
{
|
||||
cycle_t ticks;
|
||||
u64 ticks;
|
||||
unsigned long flags;
|
||||
|
||||
/* the STI hardware buffers the 48-bit count, but to
|
||||
@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p)
|
||||
* Always read STI_COUNT_H before STI_COUNT_L.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
|
||||
ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
|
||||
ticks |= em_sti_read(p, STI_COUNT_L);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
|
||||
static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
|
||||
return container_of(cs, struct em_sti_priv, cs);
|
||||
}
|
||||
|
||||
static cycle_t em_sti_clocksource_read(struct clocksource *cs)
|
||||
static u64 em_sti_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return em_sti_count(cs_to_em_sti(cs));
|
||||
}
|
||||
@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta,
|
||||
struct clock_event_device *ced)
|
||||
{
|
||||
struct em_sti_priv *p = ced_to_em_sti(ced);
|
||||
cycle_t next;
|
||||
u64 next;
|
||||
int safe;
|
||||
|
||||
next = em_sti_set_next(p, em_sti_count(p) + delta);
|
||||
|
@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void)
|
||||
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
|
||||
} while (hi != hi2);
|
||||
|
||||
return ((cycle_t)hi << 32) | lo;
|
||||
return ((u64)hi << 32) | lo;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void)
|
||||
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
|
||||
}
|
||||
|
||||
static cycle_t exynos4_frc_read(struct clocksource *cs)
|
||||
static u64 exynos4_frc_read(struct clocksource *cs)
|
||||
{
|
||||
return exynos4_read_count_32();
|
||||
}
|
||||
@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void)
|
||||
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
|
||||
{
|
||||
unsigned int tcon;
|
||||
cycle_t comp_cycle;
|
||||
u64 comp_cycle;
|
||||
|
||||
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
|
||||
|
||||
|
@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs)
|
||||
return container_of(cs, struct timer16_priv, cs);
|
||||
}
|
||||
|
||||
static cycle_t timer16_clocksource_read(struct clocksource *cs)
|
||||
static u64 timer16_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
struct timer16_priv *p = cs_to_priv(cs);
|
||||
unsigned long raw, value;
|
||||
|
@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs)
|
||||
return container_of(cs, struct tpu_priv, cs);
|
||||
}
|
||||
|
||||
static cycle_t tpu_clocksource_read(struct clocksource *cs)
|
||||
static u64 tpu_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
struct tpu_priv *p = cs_to_priv(cs);
|
||||
unsigned long flags;
|
||||
|
@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock);
|
||||
* to just read by itself. So use jiffies to emulate a free
|
||||
* running counter:
|
||||
*/
|
||||
static cycle_t i8253_read(struct clocksource *cs)
|
||||
static u64 i8253_read(struct clocksource *cs)
|
||||
{
|
||||
static int old_count;
|
||||
static u32 old_jifs;
|
||||
@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs)
|
||||
|
||||
count = (PIT_LATCH - 1) - count;
|
||||
|
||||
return (cycle_t)(jifs * PIT_LATCH) + count;
|
||||
return (u64)(jifs * PIT_LATCH) + count;
|
||||
}
|
||||
|
||||
static struct clocksource i8253_cs = {
|
||||
|
@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void)
|
||||
return seclo * NSEC_PER_SEC + nsec;
|
||||
}
|
||||
|
||||
static cycle_t jcore_clocksource_read(struct clocksource *cs)
|
||||
static u64 jcore_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return jcore_sched_clock_read();
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t metag_clocksource_read(struct clocksource *cs)
|
||||
static u64 metag_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return __core_reg_get(TXTIMER);
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static int gic_clockevent_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t gic_hpt_read(struct clocksource *cs)
|
||||
static u64 gic_hpt_read(struct clocksource *cs)
|
||||
{
|
||||
return gic_read_count();
|
||||
}
|
||||
|
@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
|
||||
return container_of(c, struct clocksource_mmio, clksrc);
|
||||
}
|
||||
|
||||
cycle_t clocksource_mmio_readl_up(struct clocksource *c)
|
||||
u64 clocksource_mmio_readl_up(struct clocksource *c)
|
||||
{
|
||||
return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg);
|
||||
return (u64)readl_relaxed(to_mmio_clksrc(c)->reg);
|
||||
}
|
||||
|
||||
cycle_t clocksource_mmio_readl_down(struct clocksource *c)
|
||||
u64 clocksource_mmio_readl_down(struct clocksource *c)
|
||||
{
|
||||
return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||
return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||
}
|
||||
|
||||
cycle_t clocksource_mmio_readw_up(struct clocksource *c)
|
||||
u64 clocksource_mmio_readw_up(struct clocksource *c)
|
||||
{
|
||||
return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg);
|
||||
return (u64)readw_relaxed(to_mmio_clksrc(c)->reg);
|
||||
}
|
||||
|
||||
cycle_t clocksource_mmio_readw_down(struct clocksource *c)
|
||||
u64 clocksource_mmio_readw_down(struct clocksource *c)
|
||||
{
|
||||
return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||
return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c)
|
||||
*/
|
||||
int __init clocksource_mmio_init(void __iomem *base, const char *name,
|
||||
unsigned long hz, int rating, unsigned bits,
|
||||
cycle_t (*read)(struct clocksource *))
|
||||
u64 (*read)(struct clocksource *))
|
||||
{
|
||||
struct clocksource_mmio *cs;
|
||||
|
||||
|
@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void)
|
||||
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
|
||||
}
|
||||
|
||||
static cycle_t timrotv1_get_cycles(struct clocksource *cs)
|
||||
static u64 timrotv1_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
|
||||
& 0xffff0000) >> 16);
|
||||
|
@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt;
|
||||
|
||||
static void __iomem *source_base;
|
||||
|
||||
static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
|
||||
static notrace u64 msm_read_timer_count(struct clocksource *cs)
|
||||
{
|
||||
return readl_relaxed(source_base + TIMER_COUNT_VAL);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
|
||||
samsung_time_start(pwm.source_id, true);
|
||||
}
|
||||
|
||||
static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
|
||||
static u64 notrace samsung_clocksource_read(struct clocksource *c)
|
||||
{
|
||||
return ~readl_relaxed(pwm.source_reg);
|
||||
}
|
||||
|
@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
|
||||
/* The base timer frequency, * 27 if selected */
|
||||
#define HRT_FREQ 1000000
|
||||
|
||||
static cycle_t read_hrt(struct clocksource *cs)
|
||||
static u64 read_hrt(struct clocksource *cs)
|
||||
{
|
||||
/* Read the timer value */
|
||||
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
||||
return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
||||
}
|
||||
|
||||
static struct clocksource cs_hrt = {
|
||||
|
@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
|
||||
return container_of(cs, struct sh_cmt_channel, cs);
|
||||
}
|
||||
|
||||
static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
|
||||
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
||||
unsigned long flags, raw;
|
||||
|
@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
|
||||
return container_of(cs, struct sh_tmu_channel, cs);
|
||||
}
|
||||
|
||||
static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
|
||||
static u64 sh_tmu_clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
|
||||
|
||||
|
@ -41,7 +41,7 @@
|
||||
|
||||
static void __iomem *tcaddr;
|
||||
|
||||
static cycle_t tc_get_cycles(struct clocksource *cs)
|
||||
static u64 tc_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 lower, upper;
|
||||
@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs)
|
||||
return (upper << 16) | lower;
|
||||
}
|
||||
|
||||
static cycle_t tc_get_cycles32(struct clocksource *cs)
|
||||
static u64 tc_get_cycles32(struct clocksource *cs)
|
||||
{
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
|
||||
writel(value, base + 0x20 * gpt_id + offset);
|
||||
}
|
||||
|
||||
static cycle_t notrace
|
||||
static u64 notrace
|
||||
pistachio_clocksource_read_cycles(struct clocksource *cs)
|
||||
{
|
||||
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
|
||||
@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
|
||||
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
|
||||
raw_spin_unlock_irqrestore(&pcs->lock, flags);
|
||||
|
||||
return (cycle_t)~counter;
|
||||
return (u64)~counter;
|
||||
}
|
||||
|
||||
static u64 notrace pistachio_read_sched_clock(void)
|
||||
|
@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
/* read 64-bit timer counter */
|
||||
static cycle_t sirfsoc_timer_read(struct clocksource *cs)
|
||||
static u64 sirfsoc_timer_read(struct clocksource *cs)
|
||||
{
|
||||
u64 cycles;
|
||||
|
||||
|
@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign
|
||||
* Clocksource: just a monotonic counter of MCK/16 cycles.
|
||||
* We don't care whether or not PIT irqs are enabled.
|
||||
*/
|
||||
static cycle_t read_pit_clk(struct clocksource *cs)
|
||||
static u64 read_pit_clk(struct clocksource *cs)
|
||||
{
|
||||
struct pit_data *data = clksrc_to_pit_data(cs);
|
||||
unsigned long flags;
|
||||
|
@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static cycle_t read_clk32k(struct clocksource *cs)
|
||||
static u64 read_clk32k(struct clocksource *cs)
|
||||
{
|
||||
return read_CRTR();
|
||||
}
|
||||
|
@ -77,11 +77,11 @@ static int __init nps_get_timer_clk(struct device_node *node,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t nps_clksrc_read(struct clocksource *clksrc)
|
||||
static u64 nps_clksrc_read(struct clocksource *clksrc)
|
||||
{
|
||||
int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
|
||||
|
||||
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
|
||||
return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
|
||||
}
|
||||
|
||||
static int __init nps_setup_clocksource(struct device_node *node)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user