mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
0db49b72bc
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) sched/tracing: Add a new tracepoint for sleeptime sched: Disable scheduler warnings during oopses sched: Fix cgroup movement of waking process sched: Fix cgroup movement of newly created process sched: Fix cgroup movement of forking process sched: Remove cfs bandwidth period check in tg_set_cfs_period() sched: Fix load-balance lock-breaking sched: Replace all_pinned with a generic flags field sched: Only queue remote wakeups when crossing cache boundaries sched: Add missing rcu_dereference() around ->real_parent usage [S390] fix cputime overflow in uptime_proc_show [S390] cputime: add sparse checking and cleanup sched: Mark parent and real_parent as __rcu sched, nohz: Fix missing RCU read lock sched, nohz: Set the NOHZ_BALANCE_KICK flag for idle load balancer sched, nohz: Fix the idle cpu check in nohz_idle_balance sched: Use jump_labels for sched_feat sched/accounting: Fix parameter passing in task_group_account_field sched/accounting: Fix user/system tick double accounting sched/accounting: Re-use scheduler statistics for the root cgroup ... Fix up conflicts in - arch/ia64/include/asm/cputime.h, include/asm-generic/cputime.h usecs_to_cputime64() vs the sparse cleanups - kernel/sched/fair.c, kernel/time/tick-sched.c scheduler changes in multiple branches
199 lines
4.8 KiB
C
199 lines
4.8 KiB
C
/*
|
|
* include/asm-s390/cputime.h
|
|
*
|
|
* (C) Copyright IBM Corp. 2004
|
|
*
|
|
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
*/
|
|
|
|
#ifndef _S390_CPUTIME_H
|
|
#define _S390_CPUTIME_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/spinlock.h>
|
|
#include <asm/div64.h>
|
|
|
|
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
|
|
|
|
typedef unsigned long long __nocast cputime_t;
|
|
typedef unsigned long long __nocast cputime64_t;
|
|
|
|
static inline unsigned long __div(unsigned long long n, unsigned long base)
|
|
{
|
|
#ifndef __s390x__
|
|
register_pair rp;
|
|
|
|
rp.pair = n >> 1;
|
|
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
|
|
return rp.subreg.odd;
|
|
#else /* __s390x__ */
|
|
return n / base;
|
|
#endif /* __s390x__ */
|
|
}
|
|
|
|
#define cputime_one_jiffy jiffies_to_cputime(1)
|
|
|
|
/*
|
|
* Convert cputime to jiffies and back.
|
|
*/
|
|
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
|
|
{
|
|
return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
|
|
}
|
|
|
|
static inline cputime_t jiffies_to_cputime(const unsigned int jif)
|
|
{
|
|
return (__force cputime_t)(jif * (4096000000ULL / HZ));
|
|
}
|
|
|
|
static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
|
|
{
|
|
unsigned long long jif = (__force unsigned long long) cputime;
|
|
do_div(jif, 4096000000ULL / HZ);
|
|
return jif;
|
|
}
|
|
|
|
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
|
{
|
|
return (__force cputime64_t)(jif * (4096000000ULL / HZ));
|
|
}
|
|
|
|
/*
|
|
* Convert cputime to microseconds and back.
|
|
*/
|
|
static inline unsigned int cputime_to_usecs(const cputime_t cputime)
|
|
{
|
|
return (__force unsigned long long) cputime >> 12;
|
|
}
|
|
|
|
static inline cputime_t usecs_to_cputime(const unsigned int m)
|
|
{
|
|
return (__force cputime_t)(m * 4096ULL);
|
|
}
|
|
|
|
#define usecs_to_cputime64(m) usecs_to_cputime(m)
|
|
|
|
/*
|
|
* Convert cputime to milliseconds and back.
|
|
*/
|
|
static inline unsigned int cputime_to_secs(const cputime_t cputime)
|
|
{
|
|
return __div((__force unsigned long long) cputime, 2048000000) >> 1;
|
|
}
|
|
|
|
static inline cputime_t secs_to_cputime(const unsigned int s)
|
|
{
|
|
return (__force cputime_t)(s * 4096000000ULL);
|
|
}
|
|
|
|
/*
|
|
* Convert cputime to timespec and back.
|
|
*/
|
|
static inline cputime_t timespec_to_cputime(const struct timespec *value)
|
|
{
|
|
unsigned long long ret = value->tv_sec * 4096000000ULL;
|
|
return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
|
|
}
|
|
|
|
static inline void cputime_to_timespec(const cputime_t cputime,
|
|
struct timespec *value)
|
|
{
|
|
unsigned long long __cputime = (__force unsigned long long) cputime;
|
|
#ifndef __s390x__
|
|
register_pair rp;
|
|
|
|
rp.pair = __cputime >> 1;
|
|
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
|
|
value->tv_nsec = rp.subreg.even * 1000 / 4096;
|
|
value->tv_sec = rp.subreg.odd;
|
|
#else
|
|
value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
|
|
value->tv_sec = __cputime / 4096000000ULL;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Convert cputime to timeval and back.
|
|
* Since cputime and timeval have the same resolution (microseconds)
|
|
* this is easy.
|
|
*/
|
|
static inline cputime_t timeval_to_cputime(const struct timeval *value)
|
|
{
|
|
unsigned long long ret = value->tv_sec * 4096000000ULL;
|
|
return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
|
|
}
|
|
|
|
static inline void cputime_to_timeval(const cputime_t cputime,
|
|
struct timeval *value)
|
|
{
|
|
unsigned long long __cputime = (__force unsigned long long) cputime;
|
|
#ifndef __s390x__
|
|
register_pair rp;
|
|
|
|
rp.pair = __cputime >> 1;
|
|
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
|
|
value->tv_usec = rp.subreg.even / 4096;
|
|
value->tv_sec = rp.subreg.odd;
|
|
#else
|
|
value->tv_usec = (__cputime % 4096000000ULL) / 4096;
|
|
value->tv_sec = __cputime / 4096000000ULL;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Convert cputime to clock and back.
|
|
*/
|
|
static inline clock_t cputime_to_clock_t(cputime_t cputime)
|
|
{
|
|
unsigned long long clock = (__force unsigned long long) cputime;
|
|
do_div(clock, 4096000000ULL / USER_HZ);
|
|
return clock;
|
|
}
|
|
|
|
static inline cputime_t clock_t_to_cputime(unsigned long x)
|
|
{
|
|
return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
|
|
}
|
|
|
|
/*
|
|
* Convert cputime64 to clock.
|
|
*/
|
|
static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
|
|
{
|
|
unsigned long long clock = (__force unsigned long long) cputime;
|
|
do_div(clock, 4096000000ULL / USER_HZ);
|
|
return clock;
|
|
}
|
|
|
|
struct s390_idle_data {
|
|
unsigned int sequence;
|
|
unsigned long long idle_count;
|
|
unsigned long long idle_enter;
|
|
unsigned long long idle_time;
|
|
int nohz_delay;
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
|
|
|
|
void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
|
|
cputime64_t s390_get_idle_time(int cpu);
|
|
|
|
#define arch_idle_time(cpu) s390_get_idle_time(cpu)
|
|
|
|
static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
|
|
__u64 enter_timer)
|
|
{
|
|
if (regs->psw.mask & PSW_MASK_WAIT)
|
|
vtime_start_cpu(int_clock, enter_timer);
|
|
}
|
|
|
|
static inline int s390_nohz_delay(int cpu)
|
|
{
|
|
return __get_cpu_var(s390_idle).nohz_delay != 0;
|
|
}
|
|
|
|
#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
|
|
|
|
#endif /* _S390_CPUTIME_H */
|