Merge branch 'mvebu/fixes-3' into mvebu/soc

This commit is contained in:
Andrew Lunn 2015-01-19 16:00:15 -06:00
commit fe6e91e338
427 changed files with 4522 additions and 3567 deletions

View File

@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>

View File

@ -10,12 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- gpios: OF device-tree gpio specification.
- interrupts: the interrupt line for that input.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
Required mutual exclusive subnode-properties:
- gpios: OF device-tree gpio specification.
- interrupts: the interrupt line for that input
Note that either "interrupts" or "gpios" properties can be omitted, but not
both at the same time. Specifying both properties is allowed.
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
@ -23,6 +24,9 @@ Optional subnode-properties:
- debounce-interval: Debouncing interval time in milliseconds.
If not specified defaults to 5.
- gpio-key,wakeup: Boolean, button can wake-up the system.
- linux,can-disable: Boolean, indicates that button is connected
to dedicated (not shared) interrupt which can be disabled to
suppress events from the button.
Example nodes:

View File

@ -8,6 +8,8 @@ Optional properties:
- debounce-interval : Debouncing interval time in milliseconds
- st,scan-count : Scanning cycles elapsed before key data is updated
- st,no-autorepeat : If specified device will not autorepeat
- keypad,num-rows : See ./matrix-keymap.txt
- keypad,num-columns : See ./matrix-keymap.txt
Example:

View File

@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h
F: drivers/char/apm-emulation.c
APPLE BCM5974 MULTITOUCH DRIVER
M: Henrik Rydberg <rydberg@euromail.se>
M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
S: Maintained
S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
M: Henrik Rydberg <rydberg@euromail.se>
M: Henrik Rydberg <rydberg@bitmath.org>
L: lm-sensors@lm-sensors.org
S: Maintained
S: Odd fixes
F: drivers/hwmon/applesmc.c
APPLETALK NETWORK LAYER
@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
M: Josef Bacik <jbacik@fb.com>
M: David Sterba <dsterba@suse.cz>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h
F: include/linux/input/
INPUT MULTITOUCH (MT) PROTOCOL
M: Henrik Rydberg <rydberg@euromail.se>
M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
S: Maintained
S: Odd fixes
F: Documentation/input/multi-touch-protocol.txt
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc4
NAME = Diseased Newt
# *DOCUMENTATION*
@ -391,6 +391,7 @@ USERINCLUDE := \
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
-Iarch/$(hdr-arch)/include/generated/uapi \
-Iarch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
-Iinclude \

View File

@ -203,27 +203,3 @@ spdif_in: spdif-in {
compatible = "linux,spdif-dir";
};
};
&pinctrl {
/*
* These pins might be muxed as I2S by
* the bootloader, but it conflicts
* with the real I2S pins that are
* muxed using i2s_pins. We must mux
* those pins to a function other than
* I2S.
*/
pinctrl-0 = <&hog_pins1 &hog_pins2>;
pinctrl-names = "default";
hog_pins1: hog-pins1 {
marvell,pins = "mpp6", "mpp8", "mpp10",
"mpp12", "mpp13";
marvell,function = "gpio";
};
hog_pins2: hog-pins2 {
marvell,pins = "mpp5", "mpp7", "mpp9";
marvell,function = "gpo";
};
};

View File

@ -338,6 +338,7 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
CONFIG_USB_EHCI_HCD_PLATFORM=y

View File

@ -413,6 +413,7 @@
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
#define __NR_execveat (__NR_SYSCALL_BASE+387)
/*
* The following SWIs are ARM private.

View File

@ -396,6 +396,7 @@
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
CALL(sys_execveat)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted

View File

@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}

View File

@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
#if defined(CONFIG_SMP)
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
#else
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100);
#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");

View File

@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
int cpu;
unsigned long bogosum = 0;
for_each_online_cpu(cpu)
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
hyp_mode_check();
}

View File

@ -246,9 +246,14 @@ static int coherency_type(void)
return type;
}
/*
* As a precaution, we currently completely disable hardware I/O
* coherency, until enough testing is done with automatic I/O
* synchronization barriers to validate that it is a proper solution.
*/
int coherency_available(void)
{
return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
return false;
}
int __init coherency_init(void)

View File

@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
if (addr < USER_PGTABLES_CEILING)
return;
if (!st->level) {
st->level = level;
st->current_prot = prot;
@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
pgd += pgdoff;
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);

View File

@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
.mask = ~PMD_SECT_RDONLY,
.prot = PMD_SECT_RDONLY,
.mask = ~L_PMD_SECT_RDONLY,
.prot = L_PMD_SECT_RDONLY,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,

View File

@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {

View File

@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HMC_DRV is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y
@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y

View File

@ -21,6 +21,7 @@
#include <asm/barrier.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/types.h>

View File

@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
u32 reg_id_dfr0;
u32 reg_id_isar0;
u32 reg_id_isar1;
u32 reg_id_isar2;
@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
u32 reg_id_mmfr3;
u32 reg_id_pfr0;
u32 reg_id_pfr1;
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);

View File

@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
static inline int set_arch_dma_coherent_ops(struct device *dev)
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
dev->archdata.dma_coherent = true;
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return 0;
dev->archdata.dma_coherent = coherent;
if (coherent)
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)

View File

@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
#define pud_page(pud) pmd_page(pud_pmd(pud))
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
}
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))

View File

@ -31,6 +31,7 @@
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@ -123,9 +124,6 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()

View File

@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
#define __NR_compat_syscalls 386
#define __NR_compat_syscalls 387
#endif
#define __ARCH_WANT_SYS_CLONE

View File

@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* If we have AArch32, we care about 32-bit features for compat. These
* registers should be RES0 otherwise.
*/
diff |= CHECK(id_dfr0, boot, cur, cpu);
diff |= CHECK(id_isar0, boot, cur, cpu);
diff |= CHECK(id_isar1, boot, cur, cpu);
diff |= CHECK(id_isar2, boot, cur, cpu);
@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_pfr0, boot, cur, cpu);
diff |= CHECK(id_pfr1, boot, cur, cpu);
diff |= CHECK(mvfr0, boot, cur, cpu);
diff |= CHECK(mvfr1, boot, cur, cpu);
diff |= CHECK(mvfr2, boot, cur, cpu);
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();

View File

@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
efi_setup_idmap();
early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
static int __init remap_region(efi_memory_desc_t *md, void **new)
@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
}
mapsize = memmap.map_end - memmap.map;
early_memunmap(memmap.map, mapsize);
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");

View File

@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/sections.h>

View File

@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}

View File

@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources();
efi_idmap_init();
early_ioremap_reset();
unflatten_device_tree();

View File

@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/cputype.h>
#include <asm/io.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);

View File

@ -5,6 +5,7 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
/*
* We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
* the thread entered __cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such.
*/
if (mm == &init_mm)
cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
/*

View File

@ -7,6 +7,7 @@
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>

View File

@ -11,7 +11,7 @@
#define NR_syscalls 318 /* length of syscall table */
#define NR_syscalls 319 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about

View File

@ -331,5 +331,6 @@
#define __NR_getrandom 1339
#define __NR_memfd_create 1340
#define __NR_bpf 1341
#define __NR_execveat 1342
#endif /* _UAPI_ASM_IA64_UNISTD_H */

View File

@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
EXPORT_SYMBOL(acpi_map_lsapic);
EXPORT_SYMBOL(acpi_map_cpu);
int acpi_unmap_lsapic(int cpu)
int acpi_unmap_cpu(int cpu)
{
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_present(cpu, false);
@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
EXPORT_SYMBOL(acpi_unmap_lsapic);
EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ACPI_NUMA

View File

@ -1779,6 +1779,7 @@ sys_call_table:
data8 sys_getrandom
data8 sys_memfd_create // 1340
data8 sys_bpf
data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */

View File

@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
err_cpu("DIV");

View File

@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
need_resched:
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
movia r4, PREEMPT_ACTIVE
stw r4, TI_PREEMPT_COUNT(r1)
rdctl r10, status /* enable intrs again */
ori r10, r10 ,STATUS_PIE
wrctl status, r10
PUSH r1
call schedule
POP r1
mov r4, r0
stw r4, TI_PREEMPT_COUNT(r1)
rdctl r10, status /* disable intrs */
andi r10, r10, %lo(~STATUS_PIE)
wrctl status, r10
br need_resched
#else
br restore_all
call preempt_schedule_irq
#endif
br restore_all
/***********************************************************************
* A few syscall wrappers

View File

@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload
fails to find a register in class R1_REGS when "a" needs to be
reloaded when generating 64-bit PIC code. Instead, we clobber
memory to indicate to the compiler that the assembly code reads
or writes to items other than those listed in the input and output
operands. This may pessimize the code somewhat but __ldcw is
usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
__asm__ __volatile__(__LDCW " 0(%2),%0" \
: "=r" (__ret), "+m" (*(a)) : "r" (a)); \
__asm__ __volatile__(__LDCW " 0(%1),%0" \
: "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})

View File

@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
#else /* !CONFIG_KEXEC */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
return 0;
}
static inline bool kdump_in_progress(void)
{
return false;
}
#endif /* CONFIG_KEXEC */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */

View File

@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)

View File

@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
#define __NR_syscalls 362
#define __NR_syscalls 363
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls

View File

@ -384,5 +384,6 @@
#define __NR_getrandom 359
#define __NR_memfd_create 360
#define __NR_bpf 361
#define __NR_execveat 362
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

View File

@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
* using debugger IPI.
*/
if (crashing_cpu == -1)
if (!kdump_in_progress())
kexec_prepare_cpus();
pr_debug("kexec: Starting switchover sequence.\n");

View File

@ -700,6 +700,7 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@ -738,14 +739,6 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
/*
* CPU must be marked active and online before we signal back to the
* master, because the scheduler needs to see the cpu_online and
* cpu_active bits set.
*/
smp_wmb();
cpu_callin_map[cpu] = 1;
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);

View File

@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
#include "pseries.h"
@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
* out to the user, but at least this will stop us from
* continuing on further and creating an even more
* difficult to debug situation.
*
* There is a known problem when kdump'ing, if cpus are offline
* the above call will fail. Rather than panicking again, keep
* going and hope the kdump kernel is also little endian, which
* it usually is.
*/
if (rc)
if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
#endif

View File

@ -3,6 +3,7 @@ config UML
default y
select HAVE_ARCH_AUDITSYSCALL
select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO

View File

@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr)
endif
clean-files += cpustr.h
# ---------------------------------------------------------------------------

View File

@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@ -46,6 +45,7 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
endif
aes-i586-y := aes-i586-asm_32.o aes_glue.o

View File

@ -208,7 +208,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
vmovdqa 3*16(p_keys), xkeyA
vmovdqa 3*16(p_keys), xkey4
.endif
.else
vmovdqa 3*16(p_keys), xkeyA
@ -224,7 +224,7 @@ ddq_add_8:
add $(16*by), p_in
.if (klen == KEY_128)
vmovdqa 4*16(p_keys), xkey4
vmovdqa 4*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 4*16(p_keys), xkey4
@ -234,7 +234,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
/* key 3 */
.if (klen == KEY_128)
vaesenc xkey4, var_xdata, var_xdata
.else
vaesenc xkeyA, var_xdata, var_xdata
.endif
.set i, (i +1)
.endr
@ -243,13 +248,18 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
vaesenc xkey4, var_xdata, var_xdata /* key 4 */
/* key 4 */
.if (klen == KEY_128)
vaesenc xkeyB, var_xdata, var_xdata
.else
vaesenc xkey4, var_xdata, var_xdata
.endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
.if (load_keys)
vmovdqa 6*16(p_keys), xkeyB
vmovdqa 6*16(p_keys), xkey8
.endif
.else
vmovdqa 6*16(p_keys), xkeyB
@ -267,12 +277,17 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
/* key 6 */
.if (klen == KEY_128)
vaesenc xkey8, var_xdata, var_xdata
.else
vaesenc xkeyB, var_xdata, var_xdata
.endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
vmovdqa 8*16(p_keys), xkey8
vmovdqa 8*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 8*16(p_keys), xkey8
@ -288,7 +303,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
vmovdqa 9*16(p_keys), xkeyA
vmovdqa 9*16(p_keys), xkey12
.endif
.else
vmovdqa 9*16(p_keys), xkeyA
@ -297,7 +312,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
vaesenc xkey8, var_xdata, var_xdata /* key 8 */
/* key 8 */
.if (klen == KEY_128)
vaesenc xkeyB, var_xdata, var_xdata
.else
vaesenc xkey8, var_xdata, var_xdata
.endif
.set i, (i +1)
.endr
@ -306,7 +326,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
/* key 9 */
.if (klen == KEY_128)
vaesenc xkey12, var_xdata, var_xdata
.else
vaesenc xkeyA, var_xdata, var_xdata
.endif
.set i, (i +1)
.endr
@ -412,7 +437,6 @@ ddq_add_8:
/* main body of aes ctr load */
.macro do_aes_ctrmain key_len
cmp $16, num_bytes
jb .Ldo_return2\key_len

View File

@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
/*
* Load per CPU data from GDT. LSL is faster than RDTSCP and
* works on all CPUs.
* works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
*/
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p;
}

View File

@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
EXPORT_SYMBOL(acpi_map_lsapic);
EXPORT_SYMBOL(acpi_map_cpu);
int acpi_unmap_lsapic(int cpu)
int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
EXPORT_SYMBOL(acpi_unmap_lsapic);
EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)

View File

@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags)
endif
clean-files += capflags.c

View File

@ -28,7 +28,7 @@ function dump_array()
# If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
[ -z "$VALUE" ] && VALUE="\"$NAME\""
[ "$VALUE" == '""' ] && continue
[ "$VALUE" = '""' ] && continue
# Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)"

View File

@ -17,7 +17,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
#define UNCORE_EXTRA_PCI_DEV_MAX 2
#define UNCORE_EXTRA_PCI_DEV_MAX 3
/* support up to 8 sockets */
#define UNCORE_SOCKET_MAX 8

View File

@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
/* Detect 6-8 core systems with only two SBOXes */
if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
u32 capid4;
pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
hswep_uncore_sbox.num_boxes = 2;
}
uncore_msr_uncores = hswep_msr_uncores;
}
@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
HSWEP_PCI_PCU_3),
},
{ /* end: all zeroes */ }
};

View File

@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
#else /* CONFIG_X86_64 */
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
struct pt_regs *user_regs = task_pt_regs(current);
/*
* If we're in an NMI that interrupted task_pt_regs setup, then
* we can't sample user regs at all. This check isn't really
* sufficient, though, as we could be in an NMI inside an interrupt
* that happened during task_pt_regs setup.
*/
if (regs->sp > (unsigned long)&user_regs->r11 &&
regs->sp <= (unsigned long)(user_regs + 1)) {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
return;
}
/*
* RIP, flags, and the argument registers are usually saved.
* orig_ax is probably okay, too.
*/
regs_user_copy->ip = user_regs->ip;
regs_user_copy->cx = user_regs->cx;
regs_user_copy->dx = user_regs->dx;
regs_user_copy->si = user_regs->si;
regs_user_copy->di = user_regs->di;
regs_user_copy->r8 = user_regs->r8;
regs_user_copy->r9 = user_regs->r9;
regs_user_copy->r10 = user_regs->r10;
regs_user_copy->r11 = user_regs->r11;
regs_user_copy->orig_ax = user_regs->orig_ax;
regs_user_copy->flags = user_regs->flags;
/*
* Don't even try to report the "rest" regs.
*/
regs_user_copy->bx = -1;
regs_user_copy->bp = -1;
regs_user_copy->r12 = -1;
regs_user_copy->r13 = -1;
regs_user_copy->r14 = -1;
regs_user_copy->r15 = -1;
/*
* For this to be at all useful, we need a reasonable guess for
* sp and the ABI. Be careful: we're in NMI context, and we're
* considering current to be the current task, so we should
* be careful not to look at any other percpu variables that might
* change during context switches.
*/
if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
task_thread_info(current)->status & TS_COMPAT) {
/* Easy case: we're in a compat syscall. */
regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
} else if (user_regs->orig_ax != -1) {
/*
* We're probably in a 64-bit syscall.
* Warning: this code is severely racy. At least it's better
* than just blindly copying user_regs.
*/
regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
regs_user_copy->sp = this_cpu_read(old_rsp);
regs_user_copy->cs = __USER_CS;
regs_user_copy->ss = __USER_DS;
regs_user_copy->cx = -1; /* usually contains garbage */
} else {
/* We're probably in an interrupt or exception. */
regs_user->abi = user_64bit_mode(user_regs) ?
PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
}
regs_user->regs = regs_user_copy;
}
#endif /* CONFIG_X86_32 */

View File

@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* zap all shadow pages.
*/
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}

View File

@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
/* According SDM, in x2apic mode, the whole id reg is used.
* But in KVM, it only use the highest eight bits. Need to
* intercept it */
vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
vmx_disable_intercept_msr_write_x2apic(0x808);
/* EOI */
vmx_disable_intercept_msr_write_x2apic(0x80b);
/* SELF-IPI */
vmx_disable_intercept_msr_write_x2apic(0x83f);
}
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
update_ple_window_actual_max();
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out7;
}
}
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
if (nested)
nested_vmx_setup_ctls_msrs();
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
/* According SDM, in x2apic mode, the whole id reg is used.
* But in KVM, it only use the highest eight bits. Need to
* intercept it */
vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
vmx_disable_intercept_msr_write_x2apic(0x808);
/* EOI */
vmx_disable_intercept_msr_write_x2apic(0x80b);
/* SELF-IPI */
vmx_disable_intercept_msr_write_x2apic(0x83f);
}
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
update_ple_window_actual_max();
return alloc_kvm_area();
out7:

View File

@ -28,7 +28,7 @@
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })

View File

@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size)
{
/*
* Explain why we shift by 5 and why we don't have to worry about
* 'step_size << 5' overflowing:
*
* initial mapped size is PMD_SIZE (2M).
* Initial mapped size is PMD_SIZE (2M).
* We can not set step_size to be PUD_SIZE (1G) yet.
* In worse case, when we cross the 1G boundary, and
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
* to map 1G range with PTE. Use 5 as shift for now.
* to map 1G range with PTE. Hence we use one less than the
* difference of page table level shifts.
*
* Don't need to worry about overflow, on 32bit, when step_size
* is 0, round_down() returns 0 for start, and that turns it
* into 0x100000000ULL.
* Don't need to worry about overflow in the top-down case, on 32bit,
* when step_size is 0, round_down() returns 0 for start, and that
* turns it into 0x100000000ULL.
* In the bottom-up case, round_up(x, 0) returns 0 though too, which
* needs to be taken into consideration by the code below.
*/
return step_size << 5;
return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
}
/**
@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
unsigned long new_mapped_ram_size;
/* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
start = map_start;
} else
start = map_start;
new_mapped_ram_size = init_range_memory_mapping(start,
mapped_ram_size += init_range_memory_mapping(start,
last_start);
last_start = start;
min_pfn_mapped = last_start >> PAGE_SHIFT;
/* only increase step_size after big range get mapped */
if (new_mapped_ram_size > mapped_ram_size)
if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
}
if (real_end < map_end)
@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end)
{
unsigned long next, new_mapped_ram_size, start;
unsigned long next, start;
unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE;
@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
* for page table.
*/
while (start < map_end) {
if (map_end - start > step_size) {
if (step_size && map_end - start > step_size) {
next = round_up(start + 1, step_size);
if (next > map_end)
next = map_end;
} else
} else {
next = map_end;
}
new_mapped_ram_size = init_range_memory_mapping(start, next);
mapped_ram_size += init_range_memory_mapping(start, next);
start = next;
if (new_mapped_ram_size > mapped_ram_size)
if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
}
}

View File

@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.

View File

@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.

View File

@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm;
/* Put the vdso above the (randomized) stack with another randomized offset.
This way there is no hole in the middle of address space.
To save memory make sure it is still in the same PTE as the stack top.
This doesn't give that many random bits.
Only used for the 64-bit and x32 vdsos. */
/*
* Put the vdso above the (randomized) stack with another randomized
* offset. This way there is no hole in the middle of address space.
* To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
*
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
unsigned long addr, end;
unsigned offset;
end = (start + PMD_SIZE - 1) & PMD_MASK;
/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
/* This loses some more bits than a modulo, but is cheaper */
offset = get_random_int() & (PTRS_PER_PTE - 1);
addr = start + (offset << PAGE_SHIFT);
if (addr >= end)
addr = end;
if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}
/*
* page-align it here so that get_unmapped_area doesn't
* align it wrongfully again to the next page. addr can come in 4K
* unaligned here as a result of stack start randomization.
* Forcibly align the final address in case we have a hardware
* issue that requires alignment for performance reasons.
*/
addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr);
return addr;

View File

@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
if (err == -EINPROGRESS)
return;
completion->err = err;
complete(&completion->completion);
}

View File

@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
obj-y += tty/
obj-y += char/
# gpu/ comes after char for AGP vs DRM startup
# iommu/ comes before gpu as gpu are using iommu controllers
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
@ -141,7 +144,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/

View File

@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
if (pr->apic_id == -1)
if (pr->phys_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
acpi_unmap_lsapic(pr->id);
acpi_unmap_cpu(pr->id);
goto out;
}
@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int apic_id, cpu_index, device_declaration = 0;
int phys_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
if (apic_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
pr->apic_id = apic_id;
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
if (phys_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
pr->phys_id = phys_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
/*
* Handle UP system running SMP kernel, with no CPU
* entry in MADT
*/
if ((cpu_index == -1) && (num_online_cpus() == 1))
cpu_index = 0;
}
@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
acpi_unmap_lsapic(pr->id);
acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();

View File

@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device))
return 0;
return -ENXIO;
result = acpi_device_get_power(device, &state);
if (result)

View File

@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
int apic_id = -1;
int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
}
if (!madt)
return apic_id;
return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
if (!map_lapic_id(header, acpi_id, &apic_id))
if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
if (!map_x2apic_id(header, type, acpi_id, &apic_id))
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &apic_id))
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
return apic_id;
return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
int apic_id = -1;
int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
map_lapic_id(header, acpi_id, &apic_id);
map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
map_lsapic_id(header, type, acpi_id, &apic_id);
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &apic_id);
map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return apic_id;
return phys_id;
}
int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
int apic_id;
int phys_id;
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
phys_id = map_mat_entry(handle, type, acpi_id);
if (phys_id == -1)
phys_id = map_madt_entry(type, acpi_id);
return apic_id;
return phys_id;
}
int acpi_map_cpuid(int apic_id, u32 acpi_id)
int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
if (apic_id == -1) {
if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
* So above apic_id is always set to -1.
* So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
* Ignores apic_id and always returns 0 for the processor
* Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return apic_id;
return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
if (apic_id == 0)
return apic_id;
if (phys_id == 0)
return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
int apic_id;
int phys_id;
apic_id = acpi_get_apicid(handle, type, acpi_id);
phys_id = acpi_get_phys_id(handle, type, acpi_id);
return acpi_map_cpuid(apic_id, acpi_id);
return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);

View File

@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
if (cx->entry_method != ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;

View File

@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
if (!device->flags.power_manageable)
if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
if (acpi_bus_init_power(device)) {
acpi_free_power_resources_lists(device);
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}
}
static void acpi_bus_get_flags(struct acpi_device *device)
@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
device->flags.visited = false;
device->flags.power_manageable = 0;
return;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
acpi_bus_update_power(device, NULL);
device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid;
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
device->flags.initialized = true;
}
device->flags.visited = false;

View File

@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
{
.callback = video_disable_native_backlight,
.ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
},
},
{
.callback = video_disable_native_backlight,
.ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
.callback = video_disable_native_backlight,
.ident = "Dell XPS15 L521X",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
},
},
{}
};

View File

@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
static struct generic_pm_domain *of_genpd_get_from_provider(
struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
return genpd;
}
EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.

View File

@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
rcu_lockdep_assert(rcu_read_lock_held() || \
lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
* Locking: This function must be called under rcu_read_lock(). This function
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
* Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
return r;
count = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n",
__func__, count);
goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count++;
}
out_unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
PTR_ERR(dev_opp)))
if (IS_ERR(dev_opp)) {
int error = PTR_ERR(dev_opp);
if (error != -ENODEV)
WARN(1, "%s: dev_opp: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
return;
}
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);

View File

@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
}
/* Checks whether the given window number is available */
/* On Armada XP, 375 and 38x the MBus window 13 has the remap
* capability, like windows 0 to 7. However, the mvebu-mbus driver
* isn't currently taking into account this special case, which means
* that when window 13 is actually used, the remap registers are left
* to 0, making the device using this MBus window unavailable. The
* quick fix for stable is to not use window 13. A follow up patch
* will correctly handle this window.
*/
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
if (win == 13)
return false;
return !(ctrl & WIN_CTRL_ENABLE);
}

View File

@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");

View File

@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");

View File

@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");

View File

@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup);
#endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);

View File

@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");

View File

@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
}
EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");

View File

@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
* to work in 2.5 by Dave Jones <davej@redhat.com>
* to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>

View File

@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");

View File

@ -199,18 +199,6 @@ struct bmc_device {
int guid_set;
char name[16];
struct kref usecount;
/* bmc device attributes */
struct device_attribute device_id_attr;
struct device_attribute provides_dev_sdrs_attr;
struct device_attribute revision_attr;
struct device_attribute firmware_rev_attr;
struct device_attribute version_attr;
struct device_attribute add_dev_support_attr;
struct device_attribute manufacturer_id_attr;
struct device_attribute product_id_attr;
struct device_attribute guid_attr;
struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
&bmc->guid_attr);
&dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
int err;
if (bmc->id.aux_firmware_revision_set) {
bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
&bmc->guid_attr);
&dev_attr_guid);
if (err)
goto out_aux_firm;
}
@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
out:
return err;
}

View File

@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
@ -968,7 +969,8 @@ static void sender(void *send_info,
do_gettimeofday(&t);
pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
msg->data[0], msg->data[1],
(long) t.tv_sec, (long) t.tv_usec);
}
}

View File

@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
if (arch_timer_use_virtual)
if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;

View File

@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;

View File

@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
/*
* Governor might not be initiated here if ACPI _PPC changed
* notification happened, so check it.
*/
if (!policy->governor)
return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >

View File

@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
last_residency = cpuidle_get_last_residency(dev) - \
drv->states[last_idx].exit_latency;
}
else
last_residency = last_state->threshold.promotion_time + 1;
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&

View File

@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
* we are basically lost in the dark how much time passed.
* As a compromise, assume we slept for the whole expected time.
* we use them anyway if they are short, and if long,
* truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@ -405,23 +405,18 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
/* Use timer value as is */
/* measured value */
measured_us = cpuidle_get_last_residency(dev);
/* Deduct exit latency */
if (measured_us > target->exit_latency)
measured_us -= target->exit_latency;
/* Make sure our coefficients do not exceed unity */
if (measured_us > data->next_timer_us)
measured_us = data->next_timer_us;
} else {
/* Use measured value */
measured_us = cpuidle_get_last_residency(dev);
/* Deduct exit latency */
if (measured_us > target->exit_latency)
measured_us -= target->exit_latency;
/* Make sure our coefficients do not exceed unity */
if (measured_us > data->next_timer_us)
measured_us = data->next_timer_us;
}
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;

View File

@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/

View File

@ -31,7 +31,6 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <uapi/asm-generic/mman-common.h>
#include <asm/processor.h>
#include "kfd_priv.h"
@ -121,27 +120,20 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
process->is_32bit_user_mode = is_32bit_user_mode;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
kfd_init_apertures(process);
return 0;
}
static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void __user *arg)
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_get_version_args args;
struct kfd_ioctl_get_version_args *args = data;
int err = 0;
args.major_version = KFD_IOCTL_MAJOR_VERSION;
args.minor_version = KFD_IOCTL_MINOR_VERSION;
if (copy_to_user(arg, &args, sizeof(args)))
err = -EFAULT;
args->major_version = KFD_IOCTL_MAJOR_VERSION;
args->minor_version = KFD_IOCTL_MINOR_VERSION;
return err;
}
@ -225,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return 0;
}
static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
void __user *arg)
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_queue_args args;
struct kfd_ioctl_create_queue_args *args = data;
struct kfd_dev *dev;
int err = 0;
unsigned int queue_id;
@ -237,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
pr_debug("kfd: creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, &args);
err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
dev = kfd_device_by_id(args.gpu_id);
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@ -254,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
err = -ESRCH;
goto err_bind_process;
}
@ -267,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err != 0)
goto err_create_queue;
args.queue_id = queue_id;
args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
if (copy_to_user(arg, &args, sizeof(args))) {
err = -EFAULT;
goto err_copy_args_out;
}
args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
mutex_unlock(&p->mutex);
pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n",
args.ring_base_address);
args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n",
args.read_pointer_address);
args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n",
args.write_pointer_address);
args->write_pointer_address);
return 0;
err_copy_args_out:
pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_bind_process:
mutex_unlock(&p->mutex);
@ -301,99 +283,90 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
void __user *arg)
void *data)
{
int retval;
struct kfd_ioctl_destroy_queue_args args;
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n",
args.queue_id,
args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
retval = pqm_destroy_queue(&p->pqm, args.queue_id);
retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
void __user *arg)
void *data)
{
int retval;
struct kfd_ioctl_update_queue_args args;
struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
if ((args.ring_base_address) &&
if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE,
(const void __user *) args.ring_base_address,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n");
return -EINVAL;
}
properties.queue_address = args.ring_base_address;
properties.queue_size = args.ring_size;
properties.queue_percent = args.queue_percentage;
properties.priority = args.queue_priority;
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n",
args.queue_id, p->pasid);
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
static long kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void __user *arg)
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_set_memory_policy_args args;
struct kfd_ioctl_set_memory_policy_args *args = data;
struct kfd_dev *dev;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
dev = kfd_device_by_id(args.gpu_id);
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@ -401,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
err = -ESRCH;
goto out;
}
default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
(args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
(void __user *)args.alternate_aperture_base,
args.alternate_aperture_size))
(void __user *)args->alternate_aperture_base,
args->alternate_aperture_size))
err = -EINVAL;
out:
@ -426,53 +399,44 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
return err;
}
static long kfd_ioctl_get_clock_counters(struct file *filep,
struct kfd_process *p, void __user *arg)
static int kfd_ioctl_get_clock_counters(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_clock_counters_args args;
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
struct timespec time;
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
dev = kfd_device_by_id(args.gpu_id);
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic(&time);
args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
get_monotonic_boottime(&time);
args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
args.system_clock_freq = 1000000000;
if (copy_to_user(arg, &args, sizeof(args)))
return -EFAULT;
args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process *p, void __user *arg)
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_process_apertures_args args;
struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
if (copy_from_user(&args, arg, sizeof(args)))
return -EFAULT;
args.num_of_nodes = 0;
args->num_of_nodes = 0;
mutex_lock(&p->mutex);
@ -481,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data(p);
do {
pAperture = &args.process_apertures[args.num_of_nodes];
pAperture =
&args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
@ -491,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
"node id %u\n", args.num_of_nodes);
"node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
@ -507,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
args.num_of_nodes++;
args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
(args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
if (copy_to_user(arg, &args, sizeof(args)))
return -EFAULT;
return 0;
}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
kfd_ioctl_get_version, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
kfd_ioctl_create_queue, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
kfd_ioctl_destroy_queue, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
kfd_ioctl_set_memory_policy, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
kfd_ioctl_get_clock_counters, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
kfd_ioctl_get_process_apertures, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
kfd_ioctl_update_queue, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
long err = -EINVAL;
amdkfd_ioctl_t *func;
const struct amdkfd_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128];
char *kdata = NULL;
unsigned int usize, asize;
int retcode = -EINVAL;
dev_dbg(kfd_device,
"ioctl cmd 0x%x (#%d), arg 0x%lx\n",
cmd, _IOC_NR(cmd), arg);
if (nr >= AMDKFD_CORE_IOCTL_COUNT)
goto err_i1;
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
ioctl = &amdkfd_ioctls[nr];
amdkfd_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
if (amdkfd_size > asize)
asize = amdkfd_size;
cmd = ioctl->cmd;
} else
goto err_i1;
dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
switch (cmd) {
case KFD_IOC_GET_VERSION:
err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
break;
case KFD_IOC_CREATE_QUEUE:
err = kfd_ioctl_create_queue(filep, process,
(void __user *)arg);
break;
case KFD_IOC_DESTROY_QUEUE:
err = kfd_ioctl_destroy_queue(filep, process,
(void __user *)arg);
break;
case KFD_IOC_SET_MEMORY_POLICY:
err = kfd_ioctl_set_memory_policy(filep, process,
(void __user *)arg);
break;
case KFD_IOC_GET_CLOCK_COUNTERS:
err = kfd_ioctl_get_clock_counters(filep, process,
(void __user *)arg);
break;
case KFD_IOC_GET_PROCESS_APERTURES:
err = kfd_ioctl_get_process_apertures(filep, process,
(void __user *)arg);
break;
case KFD_IOC_UPDATE_QUEUE:
err = kfd_ioctl_update_queue(filep, process,
(void __user *)arg);
break;
default:
dev_err(kfd_device,
"unknown ioctl cmd 0x%x, arg 0x%lx)\n",
cmd, arg);
err = -EINVAL;
break;
if (IS_ERR(process)) {
dev_dbg(kfd_device, "no process\n");
goto err_i1;
}
if (err < 0)
dev_err(kfd_device,
"ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
err, cmd, _IOC_NR(cmd));
/* Do not trust userspace, use our own definition */
func = ioctl->func;
return err;
if (unlikely(!func)) {
dev_dbg(kfd_device, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (asize > usize)
memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
retcode = -EFAULT;
goto err_i1;
}
} else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
}
retcode = func(filep, process, kdata);
if (cmd & IOC_OUT)
if (copy_to_user((void __user *)arg, kdata, usize) != 0)
retcode = -EFAULT;
err_i1:
if (!ioctl)
dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current), cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
dev_dbg(kfd_device, "ret = %d\n", retcode);
return retcode;
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)

View File

@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
{
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
qpd->vmid = 0;
q->properties.vmid = 0;
@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
q->pipe,
q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
q->queue, q->properties.write_ptr);
if (retval != 0) {
deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval;
}
return 0;
}
@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
bool prev_active = false;
BUG_ON(!dqm || !q || !q->mqd);
@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if (q->properties.is_active == true)
prev_active = true;
/*
*
* check active state vs. the previous state
* and modify counter accordingly
*/
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if ((q->properties.is_active == true) && (prev_active == false))
dqm->queue_count++;
else
else if ((q->properties.is_active == false) && (prev_active == true))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)

View File

@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev;
struct kfd_process_device *pdd;
mutex_lock(&process->mutex);
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
if (!pdd)
return -1;
/*
* For 64 bit process aperture will be statically reserved in
@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++;
}
mutex_unlock(&process->mutex);
return 0;
}

View File

@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}

View File

@ -32,7 +32,7 @@ int kfd_pasid_init(void)
{
pasid_limit = max_num_of_processes;
pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;

View File

@ -463,6 +463,24 @@ struct kfd_process {
bool is_32bit_user_mode;
};
/**
* Ioctl function type.
*
* \param filep pointer to file structure.
* \param p amdkfd process pointer.
* \param data pointer to arg that was copied from user.
*/
typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
void *data);
struct amdkfd_ioctl_desc {
unsigned int cmd;
int flags;
amdkfd_ioctl_t *func;
unsigned int cmd_drv;
const char *name;
};
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);

View File

@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
struct mm_struct;
#include "kfd_priv.h"
@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
process->is_32bit_user_mode = is_compat_task();
if (kfd_init_apertures(process) != 0)
goto err_init_apretures;
return process;
err_init_apretures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();

View File

@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, "engine_id",
dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
ret = kfd_build_sysfs_node_entry(dev, 0);
ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;

View File

@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
enum kgd_engine_type {
KGD_ENGINE_PFP = 1,
KGD_ENGINE_ME,
KGD_ENGINE_CE,
KGD_ENGINE_MEC1,
KGD_ENGINE_MEC2,
KGD_ENGINE_RLC,
KGD_ENGINE_SDMA,
KGD_ENGINE_MAX
};
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@ -137,6 +148,8 @@ struct kgd2kfd_calls {
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
* @get_fw_version: Returns FW versions from the header
*
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@ -170,12 +183,14 @@ struct kfd2kgd_calls {
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,

View File

@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;

Some files were not shown because too many files have changed in this diff Show More