mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 11:00:56 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/xen-netfront.c Minor overlapping changes in xen-netfront.c, mostly to do with some buffer management changes alongside the split of stats into TX and RX. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3f3558bb51
1
.mailmap
1
.mailmap
@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Henk Vergonet <Henk.Vergonet@gmail.com>
|
||||
Henrik Kretzschmar <henne@nachtwindheim.de>
|
||||
Henrik Rydberg <rydberg@bitmath.org>
|
||||
Herbert Xu <herbert@gondor.apana.org.au>
|
||||
Jacob Shin <Jacob.Shin@amd.com>
|
||||
James Bottomley <jejb@mulgrave.(none)>
|
||||
|
@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
|
||||
route/max_size - INTEGER
|
||||
Maximum number of routes allowed in the kernel. Increase
|
||||
this when using large numbers of interfaces and/or routes.
|
||||
From linux kernel 3.6 onwards, this is deprecated for ipv4
|
||||
as route cache is no longer used.
|
||||
|
||||
neigh/default/gc_thresh1 - INTEGER
|
||||
Minimum number of entries to keep. Garbage collector will not
|
||||
|
@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
|
||||
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
|
||||
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
|
||||
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
|
||||
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
|
||||
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
|
||||
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
|
||||
buf += " .sess_get_initiator_sid = NULL,\n"
|
||||
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
|
||||
@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
|
||||
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
|
||||
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
|
||||
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
|
||||
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
|
||||
buf += " /*\n"
|
||||
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
|
||||
buf += " */\n"
|
||||
@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += " /*\n"
|
||||
buf += " * Register the top level struct config_item_type with TCM core\n"
|
||||
buf += " */\n"
|
||||
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
|
||||
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
|
||||
buf += " if (IS_ERR(fabric)) {\n"
|
||||
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
|
||||
buf += " return PTR_ERR(fabric);\n"
|
||||
@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
if re.search('get_fabric_name', fo):
|
||||
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
|
||||
buf += "{\n"
|
||||
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
|
||||
buf += " return \"" + fabric_mod_name + "\";\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
|
||||
continue
|
||||
@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += "}\n\n"
|
||||
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
|
||||
|
||||
if re.search('stop_session\)\(', fo):
|
||||
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
|
||||
buf += "{\n"
|
||||
buf += " return;\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
|
||||
|
||||
if re.search('fall_back_to_erl0\)\(', fo):
|
||||
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
|
||||
buf += "{\n"
|
||||
buf += " return;\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
|
||||
|
||||
if re.search('sess_logged_in\)\(', fo):
|
||||
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
|
||||
buf += "{\n"
|
||||
buf += " return 0;\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
|
||||
|
||||
if re.search('sess_get_index\)\(', fo):
|
||||
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
|
||||
buf += "{\n"
|
||||
@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
|
||||
|
||||
if re.search('queue_tm_rsp\)\(', fo):
|
||||
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
|
||||
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
|
||||
buf += "{\n"
|
||||
buf += " return 0;\n"
|
||||
buf += " return;\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
|
||||
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
|
||||
|
||||
if re.search('is_state_remove\)\(', fo):
|
||||
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
|
||||
if re.search('aborted_task\)\(', fo):
|
||||
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
|
||||
buf += "{\n"
|
||||
buf += " return 0;\n"
|
||||
buf += " return;\n"
|
||||
buf += "}\n\n"
|
||||
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
|
||||
|
||||
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
|
||||
|
||||
ret = p.write(buf)
|
||||
if ret:
|
||||
@ -1018,11 +993,11 @@ def main(modname, proto_ident):
|
||||
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
|
||||
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
|
||||
|
||||
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
|
||||
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
|
||||
if input == "yes" or input == "y":
|
||||
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
|
||||
|
||||
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
|
||||
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
|
||||
if input == "yes" or input == "y":
|
||||
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
|
||||
|
||||
|
24
MAINTAINERS
24
MAINTAINERS
@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h
|
||||
F: drivers/char/apm-emulation.c
|
||||
|
||||
APPLE BCM5974 MULTITOUCH DRIVER
|
||||
M: Henrik Rydberg <rydberg@euromail.se>
|
||||
M: Henrik Rydberg <rydberg@bitmath.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Odd fixes
|
||||
F: drivers/input/mouse/bcm5974.c
|
||||
|
||||
APPLE SMC DRIVER
|
||||
M: Henrik Rydberg <rydberg@euromail.se>
|
||||
M: Henrik Rydberg <rydberg@bitmath.org>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Maintained
|
||||
S: Odd fixes
|
||||
F: drivers/hwmon/applesmc.c
|
||||
|
||||
APPLETALK NETWORK LAYER
|
||||
@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c
|
||||
BTRFS FILE SYSTEM
|
||||
M: Chris Mason <clm@fb.com>
|
||||
M: Josef Bacik <jbacik@fb.com>
|
||||
M: David Sterba <dsterba@suse.cz>
|
||||
L: linux-btrfs@vger.kernel.org
|
||||
W: http://btrfs.wiki.kernel.org/
|
||||
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
|
||||
@ -4748,7 +4749,7 @@ S: Supported
|
||||
F: drivers/scsi/ipr.*
|
||||
|
||||
IBM Power Virtual Ethernet Device Driver
|
||||
M: Santiago Leon <santil@linux.vnet.ibm.com>
|
||||
M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmveth.*
|
||||
@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h
|
||||
F: include/linux/input/
|
||||
|
||||
INPUT MULTITOUCH (MT) PROTOCOL
|
||||
M: Henrik Rydberg <rydberg@euromail.se>
|
||||
M: Henrik Rydberg <rydberg@bitmath.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
|
||||
S: Maintained
|
||||
S: Odd fixes
|
||||
F: Documentation/input/multi-touch-protocol.txt
|
||||
F: drivers/input/input-mt.c
|
||||
K: \b(ABS|SYN)_MT_
|
||||
@ -5279,6 +5280,15 @@ W: www.open-iscsi.org
|
||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||
F: drivers/infiniband/ulp/iser/
|
||||
|
||||
ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
|
||||
M: Sagi Grimberg <sagig@mellanox.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
|
||||
L: linux-rdma@vger.kernel.org
|
||||
L: target-devel@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.linux-iscsi.org
|
||||
F: drivers/infiniband/ulp/isert
|
||||
|
||||
ISDN SUBSYSTEM
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
|
3
Makefile
3
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Diseased Newt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -391,6 +391,7 @@ USERINCLUDE := \
|
||||
# Needed to be compatible with the O= option
|
||||
LINUXINCLUDE := \
|
||||
-I$(srctree)/arch/$(hdr-arch)/include \
|
||||
-Iarch/$(hdr-arch)/include/generated/uapi \
|
||||
-Iarch/$(hdr-arch)/include/generated \
|
||||
$(if $(KBUILD_SRC), -I$(srctree)/include) \
|
||||
-Iinclude \
|
||||
|
@ -159,13 +159,28 @@ &fec1 {
|
||||
pinctrl-0 = <&pinctrl_enet1>;
|
||||
phy-supply = <®_enet_3v3>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <ðphy1>;
|
||||
status = "okay";
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ethphy1: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
ethphy2: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&fec2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet2>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <ðphy2>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -129,13 +129,28 @@ &esdhc1 {
|
||||
|
||||
&fec0 {
|
||||
phy-mode = "rmii";
|
||||
phy-handle = <ðphy0>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_fec0>;
|
||||
status = "okay";
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ethphy0: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
ethphy1: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&fec1 {
|
||||
phy-mode = "rmii";
|
||||
phy-handle = <ðphy1>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_fec1>;
|
||||
status = "okay";
|
||||
|
@ -413,6 +413,7 @@
|
||||
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
|
||||
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
|
||||
#define __NR_bpf (__NR_SYSCALL_BASE+386)
|
||||
#define __NR_execveat (__NR_SYSCALL_BASE+387)
|
||||
|
||||
/*
|
||||
* The following SWIs are ARM private.
|
||||
|
@ -396,6 +396,7 @@
|
||||
CALL(sys_getrandom)
|
||||
/* 385 */ CALL(sys_memfd_create)
|
||||
CALL(sys_bpf)
|
||||
CALL(sys_execveat)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
#define syscalls_counted
|
||||
|
@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
|
||||
{
|
||||
return PERF_SAMPLE_REGS_ABI_32;
|
||||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
}
|
||||
|
@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
|
||||
static const char units[] = "KMGTPE";
|
||||
u64 prot = val & pg_level[level].mask;
|
||||
|
||||
if (addr < USER_PGTABLES_CEILING)
|
||||
return;
|
||||
|
||||
if (!st->level) {
|
||||
st->level = level;
|
||||
st->current_prot = prot;
|
||||
@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
|
||||
pgd_t *pgd = swapper_pg_dir;
|
||||
struct pg_state st;
|
||||
unsigned long addr;
|
||||
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
|
||||
unsigned i;
|
||||
|
||||
memset(&st, 0, sizeof(st));
|
||||
st.seq = m;
|
||||
st.marker = address_markers;
|
||||
|
||||
pgd += pgdoff;
|
||||
|
||||
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
addr = i * PGDIR_SIZE;
|
||||
if (!pgd_none(*pgd)) {
|
||||
walk_pud(&st, pgd, addr);
|
||||
|
@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
|
||||
.start = (unsigned long)_stext,
|
||||
.end = (unsigned long)__init_begin,
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
.mask = ~PMD_SECT_RDONLY,
|
||||
.prot = PMD_SECT_RDONLY,
|
||||
.mask = ~L_PMD_SECT_RDONLY,
|
||||
.prot = L_PMD_SECT_RDONLY,
|
||||
#else
|
||||
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
|
||||
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
|
||||
|
@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
|
||||
static void __init map_lowmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
for_each_memblock(memory, reg) {
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
|
||||
u64 reg_id_aa64pfr0;
|
||||
u64 reg_id_aa64pfr1;
|
||||
|
||||
u32 reg_id_dfr0;
|
||||
u32 reg_id_isar0;
|
||||
u32 reg_id_isar1;
|
||||
u32 reg_id_isar2;
|
||||
@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
|
||||
u32 reg_id_mmfr3;
|
||||
u32 reg_id_pfr0;
|
||||
u32 reg_id_pfr1;
|
||||
|
||||
u32 reg_mvfr0;
|
||||
u32 reg_mvfr1;
|
||||
u32 reg_mvfr2;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
|
||||
|
@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
@ -123,9 +124,6 @@ struct task_struct;
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
/* Prepare to copy thread state - unlazy all lazy status */
|
||||
#define prepare_to_copy(tsk) do { } while (0)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
|
@ -44,7 +44,7 @@
|
||||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 386
|
||||
#define __NR_compat_syscalls 387
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
|
||||
* If we have AArch32, we care about 32-bit features for compat. These
|
||||
* registers should be RES0 otherwise.
|
||||
*/
|
||||
diff |= CHECK(id_dfr0, boot, cur, cpu);
|
||||
diff |= CHECK(id_isar0, boot, cur, cpu);
|
||||
diff |= CHECK(id_isar1, boot, cur, cpu);
|
||||
diff |= CHECK(id_isar2, boot, cur, cpu);
|
||||
@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
|
||||
diff |= CHECK(id_pfr0, boot, cur, cpu);
|
||||
diff |= CHECK(id_pfr1, boot, cur, cpu);
|
||||
|
||||
diff |= CHECK(mvfr0, boot, cur, cpu);
|
||||
diff |= CHECK(mvfr1, boot, cur, cpu);
|
||||
diff |= CHECK(mvfr2, boot, cur, cpu);
|
||||
|
||||
/*
|
||||
* Mismatched CPU features are a recipe for disaster. Don't even
|
||||
* pretend to support them.
|
||||
@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
|
||||
|
||||
info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
|
||||
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
|
||||
info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
|
||||
info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
|
||||
@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
|
||||
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
|
||||
|
||||
info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
|
||||
info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
|
||||
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
|
||||
|
||||
cpuinfo_detect_icache_policy(info);
|
||||
|
||||
check_local_cpu_errata();
|
||||
|
@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
|
||||
|
||||
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
|
||||
efi_setup_idmap();
|
||||
early_memunmap(memmap.map, memmap.map_end - memmap.map);
|
||||
}
|
||||
|
||||
static int __init remap_region(efi_memory_desc_t *md, void **new)
|
||||
@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
|
||||
}
|
||||
|
||||
mapsize = memmap.map_end - memmap.map;
|
||||
early_memunmap(memmap.map, mapsize);
|
||||
|
||||
if (efi_runtime_disabled()) {
|
||||
pr_info("EFI runtime services will be disabled.\n");
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
|
@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
|
||||
else
|
||||
return PERF_SAMPLE_REGS_ABI_64;
|
||||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
}
|
||||
|
@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
request_standard_resources();
|
||||
|
||||
efi_idmap_init();
|
||||
early_ioremap_reset();
|
||||
|
||||
unflatten_device_tree();
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
extern void secondary_holding_pen(void);
|
||||
|
@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
|
||||
* Instead, we invalidate Stage-2 for this IPA, and the
|
||||
* whole of Stage-1. Weep...
|
||||
*/
|
||||
lsr x1, x1, #12
|
||||
tlbi ipas2e1is, x1
|
||||
/*
|
||||
* We have to ensure completion of the invalidation at Stage-2,
|
||||
|
@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (!cpu_has_32bit_el1())
|
||||
return -EINVAL;
|
||||
cpu_reset = &default_regs_reset32;
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
} else {
|
||||
cpu_reset = &default_regs_reset;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
|
@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
|
||||
{
|
||||
return _acpi_map_lsapic(handle, physid, pcpu);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
EXPORT_SYMBOL(acpi_map_cpu);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
int acpi_unmap_cpu(int cpu)
|
||||
{
|
||||
ia64_cpu_to_sapicid[cpu] = -1;
|
||||
set_cpu_present(cpu, false);
|
||||
@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_unmap_lsapic);
|
||||
EXPORT_SYMBOL(acpi_unmap_cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
|
@ -231,7 +231,7 @@ int hypfs_vm_create_files(struct dentry *root)
|
||||
struct dbfs_d2fc_hdr {
|
||||
u64 len; /* Length of d2fc buffer without header */
|
||||
u16 version; /* Version of header */
|
||||
char tod_ext[16]; /* TOD clock for d2fc */
|
||||
char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
|
||||
u64 count; /* Number of VM guests in d2fc buffer */
|
||||
char reserved[30];
|
||||
} __attribute__ ((packed));
|
||||
|
@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
|
||||
|
||||
static inline notrace unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return __arch_local_irq_stosm(0x00);
|
||||
return __arch_local_irq_stnsm(0xff);
|
||||
}
|
||||
|
||||
static inline notrace unsigned long arch_local_irq_save(void)
|
||||
|
@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
static inline void get_tod_clock_ext(char clk[16])
|
||||
static inline void get_tod_clock_ext(char *clk)
|
||||
{
|
||||
typedef struct { char _[sizeof(clk)]; } addrtype;
|
||||
typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
|
||||
|
||||
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
{
|
||||
unsigned char clk[16];
|
||||
unsigned char clk[STORE_CLOCK_EXT_SIZE];
|
||||
|
||||
get_tod_clock_ext(clk);
|
||||
return *((unsigned long long *)&clk[1]);
|
||||
}
|
||||
|
@ -289,7 +289,8 @@
|
||||
#define __NR_bpf 351
|
||||
#define __NR_s390_pci_mmio_write 352
|
||||
#define __NR_s390_pci_mmio_read 353
|
||||
#define NR_syscalls 354
|
||||
#define __NR_execveat 354
|
||||
#define NR_syscalls 355
|
||||
|
||||
/*
|
||||
* There are some system calls that are not present on 64 bit, some
|
||||
|
@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
|
||||
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
|
||||
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
|
||||
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
|
||||
SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
|
||||
|
@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int check_per_event(unsigned short cause, unsigned long control,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (!(regs->psw.mask & PSW_MASK_PER))
|
||||
return 0;
|
||||
/* user space single step */
|
||||
if (control == 0)
|
||||
return 1;
|
||||
/* over indication for storage alteration */
|
||||
if ((control & 0x20200000) && (cause & 0x2000))
|
||||
return 1;
|
||||
if (cause & 0x8000) {
|
||||
/* all branches */
|
||||
if ((control & 0x80800000) == 0x80000000)
|
||||
return 1;
|
||||
/* branch into selected range */
|
||||
if (((control & 0x80800000) == 0x80800000) &&
|
||||
regs->psw.addr >= current->thread.per_user.start &&
|
||||
regs->psw.addr <= current->thread.per_user.end)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
{
|
||||
int fixup = probe_get_fixup_type(auprobe->insn);
|
||||
@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
if (regs->psw.addr - utask->xol_vaddr == ilen)
|
||||
regs->psw.addr = utask->vaddr + ilen;
|
||||
}
|
||||
/* If per tracing was active generate trap */
|
||||
if (regs->psw.mask & PSW_MASK_PER)
|
||||
do_per_trap(regs);
|
||||
if (check_per_event(current->thread.per_event.cause,
|
||||
current->thread.per_user.control, regs)) {
|
||||
/* fix per address */
|
||||
current->thread.per_event.address = utask->vaddr;
|
||||
/* trigger per event */
|
||||
set_pt_regs_flag(regs, PIF_PER_TRAP);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
|
||||
regs->int_code = auprobe->saved_int_code;
|
||||
regs->psw.addr = current->utask->vaddr;
|
||||
current->thread.per_event.address = current->utask->vaddr;
|
||||
}
|
||||
|
||||
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
|
||||
@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
|
||||
__rc; \
|
||||
})
|
||||
|
||||
#define emu_store_ril(ptr, input) \
|
||||
#define emu_store_ril(regs, ptr, input) \
|
||||
({ \
|
||||
unsigned int mask = sizeof(*(ptr)) - 1; \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
int __rc = 0; \
|
||||
\
|
||||
if (!test_facility(34)) \
|
||||
__rc = EMU_ILLEGAL_OP; \
|
||||
else if ((u64 __force)ptr & mask) \
|
||||
else if ((u64 __force)__ptr & mask) \
|
||||
__rc = EMU_SPECIFICATION; \
|
||||
else if (put_user(*(input), ptr)) \
|
||||
else if (put_user(*(input), __ptr)) \
|
||||
__rc = EMU_ADDRESSING; \
|
||||
if (__rc == 0) \
|
||||
sim_stor_event(regs, __ptr, mask + 1); \
|
||||
__rc; \
|
||||
})
|
||||
|
||||
@ -197,6 +229,25 @@ union split_register {
|
||||
s16 s16[4];
|
||||
};
|
||||
|
||||
/*
|
||||
* If user per registers are setup to trace storage alterations and an
|
||||
* emulated store took place on a fitting address a user trap is generated.
|
||||
*/
|
||||
static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
|
||||
{
|
||||
if (!(regs->psw.mask & PSW_MASK_PER))
|
||||
return;
|
||||
if (!(current->thread.per_user.control & PER_EVENT_STORE))
|
||||
return;
|
||||
if ((void *)current->thread.per_user.start > (addr + len))
|
||||
return;
|
||||
if ((void *)current->thread.per_user.end < addr)
|
||||
return;
|
||||
current->thread.per_event.address = regs->psw.addr;
|
||||
current->thread.per_event.cause = PER_EVENT_STORE >> 16;
|
||||
set_pt_regs_flag(regs, PIF_PER_TRAP);
|
||||
}
|
||||
|
||||
/*
|
||||
* pc relative instructions are emulated, since parameters may not be
|
||||
* accessible from the xol area due to range limitations.
|
||||
@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
|
||||
break;
|
||||
case 0x07: /* sthrl */
|
||||
rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
|
||||
rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
|
||||
break;
|
||||
case 0x0b: /* stgrl */
|
||||
rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
|
||||
rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
|
||||
break;
|
||||
case 0x0f: /* strl */
|
||||
rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
|
||||
rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, system;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
S390_lowcore.last_update_timer = get_vtimer();
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
|
@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
|
||||
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long offset;
|
||||
unsigned long offset, mask;
|
||||
|
||||
offset = (unsigned long) entry / sizeof(unsigned long);
|
||||
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
|
||||
page = pmd_to_page((pmd_t *) entry);
|
||||
mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
|
||||
page = virt_to_page((void *)((unsigned long) entry & mask));
|
||||
return page->index + offset;
|
||||
}
|
||||
|
||||
|
@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
|
||||
EMIT4_DISP(0x88500000, K);
|
||||
break;
|
||||
case BPF_ALU | BPF_NEG: /* A = -A */
|
||||
/* lnr %r5,%r5 */
|
||||
EMIT2(0x1155);
|
||||
/* lcr %r5,%r5 */
|
||||
EMIT2(0x1355);
|
||||
break;
|
||||
case BPF_JMP | BPF_JA: /* ip += K */
|
||||
offset = addrs[i + K] + jit->start - jit->prg;
|
||||
@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) {
|
||||
xbranch: /* Emit compare if the branch targets are different */
|
||||
if (filter->jt != filter->jf) {
|
||||
jit->seen |= SEEN_XREG;
|
||||
/* cr %r5,%r12 */
|
||||
EMIT2(0x195c);
|
||||
/* clr %r5,%r12 */
|
||||
EMIT2(0x155c);
|
||||
}
|
||||
goto branch;
|
||||
case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
|
||||
|
@ -51,6 +51,7 @@ targets += cpustr.h
|
||||
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
|
||||
$(call if_changed,cpustr)
|
||||
endif
|
||||
clean-files += cpustr.h
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
|
||||
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
|
||||
@ -46,6 +45,7 @@ endif
|
||||
ifeq ($(avx2_supported),yes)
|
||||
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
|
||||
endif
|
||||
|
||||
aes-i586-y := aes-i586-asm_32.o aes_glue.o
|
||||
|
@ -208,7 +208,7 @@ ddq_add_8:
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 3*16(p_keys), xkeyA
|
||||
vmovdqa 3*16(p_keys), xkey4
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 3*16(p_keys), xkeyA
|
||||
@ -224,7 +224,7 @@ ddq_add_8:
|
||||
add $(16*by), p_in
|
||||
|
||||
.if (klen == KEY_128)
|
||||
vmovdqa 4*16(p_keys), xkey4
|
||||
vmovdqa 4*16(p_keys), xkeyB
|
||||
.else
|
||||
.if (load_keys)
|
||||
vmovdqa 4*16(p_keys), xkey4
|
||||
@ -234,7 +234,12 @@ ddq_add_8:
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
|
||||
/* key 3 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenc xkey4, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkeyA, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
@ -243,13 +248,18 @@ ddq_add_8:
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkey4, var_xdata, var_xdata /* key 4 */
|
||||
/* key 4 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenc xkeyB, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkey4, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 6*16(p_keys), xkeyB
|
||||
vmovdqa 6*16(p_keys), xkey8
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 6*16(p_keys), xkeyB
|
||||
@ -267,12 +277,17 @@ ddq_add_8:
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
|
||||
/* key 6 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenc xkey8, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkeyB, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
vmovdqa 8*16(p_keys), xkey8
|
||||
vmovdqa 8*16(p_keys), xkeyB
|
||||
.else
|
||||
.if (load_keys)
|
||||
vmovdqa 8*16(p_keys), xkey8
|
||||
@ -288,7 +303,7 @@ ddq_add_8:
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 9*16(p_keys), xkeyA
|
||||
vmovdqa 9*16(p_keys), xkey12
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 9*16(p_keys), xkeyA
|
||||
@ -297,7 +312,12 @@ ddq_add_8:
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkey8, var_xdata, var_xdata /* key 8 */
|
||||
/* key 8 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenc xkeyB, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkey8, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
@ -306,7 +326,12 @@ ddq_add_8:
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
|
||||
/* key 9 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenc xkey12, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkeyA, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
@ -412,7 +437,6 @@ ddq_add_8:
|
||||
/* main body of aes ctr load */
|
||||
|
||||
.macro do_aes_ctrmain key_len
|
||||
|
||||
cmp $16, num_bytes
|
||||
jb .Ldo_return2\key_len
|
||||
|
||||
|
@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
|
||||
|
||||
/*
|
||||
* Load per CPU data from GDT. LSL is faster than RDTSCP and
|
||||
* works on all CPUs.
|
||||
* works on all CPUs. This is volatile so that it orders
|
||||
* correctly wrt barrier() and to keep gcc from cleverly
|
||||
* hoisting it out of the calling function.
|
||||
*/
|
||||
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
|
||||
{
|
||||
return _acpi_map_lsapic(handle, physid, pcpu);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
EXPORT_SYMBOL(acpi_map_cpu);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
int acpi_unmap_cpu(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
|
||||
@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_unmap_lsapic);
|
||||
EXPORT_SYMBOL(acpi_unmap_cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
|
||||
|
@ -66,3 +66,4 @@ targets += capflags.c
|
||||
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
|
||||
$(call if_changed,mkcapflags)
|
||||
endif
|
||||
clean-files += capflags.c
|
||||
|
@ -28,7 +28,7 @@ function dump_array()
|
||||
# If the /* comment */ starts with a quote string, grab that.
|
||||
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
|
||||
[ -z "$VALUE" ] && VALUE="\"$NAME\""
|
||||
[ "$VALUE" == '""' ] && continue
|
||||
[ "$VALUE" = '""' ] && continue
|
||||
|
||||
# Name is uppercase, VALUE is all lowercase
|
||||
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
|
||||
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
|
||||
#define UNCORE_EXTRA_PCI_DEV 0xff
|
||||
#define UNCORE_EXTRA_PCI_DEV_MAX 2
|
||||
#define UNCORE_EXTRA_PCI_DEV_MAX 3
|
||||
|
||||
/* support up to 8 sockets */
|
||||
#define UNCORE_SOCKET_MAX 8
|
||||
|
@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
|
||||
enum {
|
||||
SNBEP_PCI_QPI_PORT0_FILTER,
|
||||
SNBEP_PCI_QPI_PORT1_FILTER,
|
||||
HSWEP_PCI_PCU_3,
|
||||
};
|
||||
|
||||
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
||||
@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
|
||||
{
|
||||
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
||||
/* Detect 6-8 core systems with only two SBOXes */
|
||||
if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
|
||||
u32 capid4;
|
||||
|
||||
pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
|
||||
0x94, &capid4);
|
||||
if (((capid4 >> 6) & 0x3) == 0)
|
||||
hswep_uncore_sbox.num_boxes = 2;
|
||||
}
|
||||
|
||||
uncore_msr_uncores = hswep_msr_uncores;
|
||||
}
|
||||
|
||||
@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
SNBEP_PCI_QPI_PORT1_FILTER),
|
||||
},
|
||||
{ /* PCU.3 (for Capability registers) */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
HSWEP_PCI_PCU_3),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
|
@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
|
||||
{
|
||||
return PERF_SAMPLE_REGS_ABI_32;
|
||||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
}
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
|
||||
(1ULL << PERF_REG_X86_ES) | \
|
||||
@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
|
||||
else
|
||||
return PERF_SAMPLE_REGS_ABI_64;
|
||||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
struct pt_regs *user_regs = task_pt_regs(current);
|
||||
|
||||
/*
|
||||
* If we're in an NMI that interrupted task_pt_regs setup, then
|
||||
* we can't sample user regs at all. This check isn't really
|
||||
* sufficient, though, as we could be in an NMI inside an interrupt
|
||||
* that happened during task_pt_regs setup.
|
||||
*/
|
||||
if (regs->sp > (unsigned long)&user_regs->r11 &&
|
||||
regs->sp <= (unsigned long)(user_regs + 1)) {
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
|
||||
regs_user->regs = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* RIP, flags, and the argument registers are usually saved.
|
||||
* orig_ax is probably okay, too.
|
||||
*/
|
||||
regs_user_copy->ip = user_regs->ip;
|
||||
regs_user_copy->cx = user_regs->cx;
|
||||
regs_user_copy->dx = user_regs->dx;
|
||||
regs_user_copy->si = user_regs->si;
|
||||
regs_user_copy->di = user_regs->di;
|
||||
regs_user_copy->r8 = user_regs->r8;
|
||||
regs_user_copy->r9 = user_regs->r9;
|
||||
regs_user_copy->r10 = user_regs->r10;
|
||||
regs_user_copy->r11 = user_regs->r11;
|
||||
regs_user_copy->orig_ax = user_regs->orig_ax;
|
||||
regs_user_copy->flags = user_regs->flags;
|
||||
|
||||
/*
|
||||
* Don't even try to report the "rest" regs.
|
||||
*/
|
||||
regs_user_copy->bx = -1;
|
||||
regs_user_copy->bp = -1;
|
||||
regs_user_copy->r12 = -1;
|
||||
regs_user_copy->r13 = -1;
|
||||
regs_user_copy->r14 = -1;
|
||||
regs_user_copy->r15 = -1;
|
||||
|
||||
/*
|
||||
* For this to be at all useful, we need a reasonable guess for
|
||||
* sp and the ABI. Be careful: we're in NMI context, and we're
|
||||
* considering current to be the current task, so we should
|
||||
* be careful not to look at any other percpu variables that might
|
||||
* change during context switches.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
|
||||
task_thread_info(current)->status & TS_COMPAT) {
|
||||
/* Easy case: we're in a compat syscall. */
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
|
||||
regs_user_copy->sp = user_regs->sp;
|
||||
regs_user_copy->cs = user_regs->cs;
|
||||
regs_user_copy->ss = user_regs->ss;
|
||||
} else if (user_regs->orig_ax != -1) {
|
||||
/*
|
||||
* We're probably in a 64-bit syscall.
|
||||
* Warning: this code is severely racy. At least it's better
|
||||
* than just blindly copying user_regs.
|
||||
*/
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
|
||||
regs_user_copy->sp = this_cpu_read(old_rsp);
|
||||
regs_user_copy->cs = __USER_CS;
|
||||
regs_user_copy->ss = __USER_DS;
|
||||
regs_user_copy->cx = -1; /* usually contains garbage */
|
||||
} else {
|
||||
/* We're probably in an interrupt or exception. */
|
||||
regs_user->abi = user_64bit_mode(user_regs) ?
|
||||
PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
|
||||
regs_user_copy->sp = user_regs->sp;
|
||||
regs_user_copy->cs = user_regs->cs;
|
||||
regs_user_copy->ss = user_regs->ss;
|
||||
}
|
||||
|
||||
regs_user->regs = regs_user_copy;
|
||||
}
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
/* Verify next sizeof(t) bytes can be on the same instruction */
|
||||
#define validate_next(t, insn, n) \
|
||||
((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
|
||||
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
|
||||
|
||||
#define __get_next(t, insn) \
|
||||
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
|
||||
|
@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
|
||||
static unsigned long __init get_new_step_size(unsigned long step_size)
|
||||
{
|
||||
/*
|
||||
* Explain why we shift by 5 and why we don't have to worry about
|
||||
* 'step_size << 5' overflowing:
|
||||
*
|
||||
* initial mapped size is PMD_SIZE (2M).
|
||||
* Initial mapped size is PMD_SIZE (2M).
|
||||
* We can not set step_size to be PUD_SIZE (1G) yet.
|
||||
* In worse case, when we cross the 1G boundary, and
|
||||
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
|
||||
* to map 1G range with PTE. Use 5 as shift for now.
|
||||
* to map 1G range with PTE. Hence we use one less than the
|
||||
* difference of page table level shifts.
|
||||
*
|
||||
* Don't need to worry about overflow, on 32bit, when step_size
|
||||
* is 0, round_down() returns 0 for start, and that turns it
|
||||
* into 0x100000000ULL.
|
||||
* Don't need to worry about overflow in the top-down case, on 32bit,
|
||||
* when step_size is 0, round_down() returns 0 for start, and that
|
||||
* turns it into 0x100000000ULL.
|
||||
* In the bottom-up case, round_up(x, 0) returns 0 though too, which
|
||||
* needs to be taken into consideration by the code below.
|
||||
*/
|
||||
return step_size << 5;
|
||||
return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
|
||||
unsigned long step_size;
|
||||
unsigned long addr;
|
||||
unsigned long mapped_ram_size = 0;
|
||||
unsigned long new_mapped_ram_size;
|
||||
|
||||
/* xen has big range in reserved near end of ram, skip it at first.*/
|
||||
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
|
||||
@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
|
||||
start = map_start;
|
||||
} else
|
||||
start = map_start;
|
||||
new_mapped_ram_size = init_range_memory_mapping(start,
|
||||
mapped_ram_size += init_range_memory_mapping(start,
|
||||
last_start);
|
||||
last_start = start;
|
||||
min_pfn_mapped = last_start >> PAGE_SHIFT;
|
||||
/* only increase step_size after big range get mapped */
|
||||
if (new_mapped_ram_size > mapped_ram_size)
|
||||
if (mapped_ram_size >= step_size)
|
||||
step_size = get_new_step_size(step_size);
|
||||
mapped_ram_size += new_mapped_ram_size;
|
||||
}
|
||||
|
||||
if (real_end < map_end)
|
||||
@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
|
||||
static void __init memory_map_bottom_up(unsigned long map_start,
|
||||
unsigned long map_end)
|
||||
{
|
||||
unsigned long next, new_mapped_ram_size, start;
|
||||
unsigned long next, start;
|
||||
unsigned long mapped_ram_size = 0;
|
||||
/* step_size need to be small so pgt_buf from BRK could cover it */
|
||||
unsigned long step_size = PMD_SIZE;
|
||||
@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
|
||||
* for page table.
|
||||
*/
|
||||
while (start < map_end) {
|
||||
if (map_end - start > step_size) {
|
||||
if (step_size && map_end - start > step_size) {
|
||||
next = round_up(start + 1, step_size);
|
||||
if (next > map_end)
|
||||
next = map_end;
|
||||
} else
|
||||
} else {
|
||||
next = map_end;
|
||||
}
|
||||
|
||||
new_mapped_ram_size = init_range_memory_mapping(start, next);
|
||||
mapped_ram_size += init_range_memory_mapping(start, next);
|
||||
start = next;
|
||||
|
||||
if (new_mapped_ram_size > mapped_ram_size)
|
||||
if (mapped_ram_size >= step_size)
|
||||
step_size = get_new_step_size(step_size);
|
||||
mapped_ram_size += new_mapped_ram_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
|
||||
|
||||
struct linux_binprm;
|
||||
|
||||
/* Put the vdso above the (randomized) stack with another randomized offset.
|
||||
This way there is no hole in the middle of address space.
|
||||
To save memory make sure it is still in the same PTE as the stack top.
|
||||
This doesn't give that many random bits.
|
||||
|
||||
Only used for the 64-bit and x32 vdsos. */
|
||||
/*
|
||||
* Put the vdso above the (randomized) stack with another randomized
|
||||
* offset. This way there is no hole in the middle of address space.
|
||||
* To save memory make sure it is still in the same PTE as the stack
|
||||
* top. This doesn't give that many random bits.
|
||||
*
|
||||
* Note that this algorithm is imperfect: the distribution of the vdso
|
||||
* start address within a PMD is biased toward the end.
|
||||
*
|
||||
* Only used for the 64-bit and x32 vdsos.
|
||||
*/
|
||||
static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||
#else
|
||||
unsigned long addr, end;
|
||||
unsigned offset;
|
||||
end = (start + PMD_SIZE - 1) & PMD_MASK;
|
||||
|
||||
/*
|
||||
* Round up the start address. It can start out unaligned as a result
|
||||
* of stack start randomization.
|
||||
*/
|
||||
start = PAGE_ALIGN(start);
|
||||
|
||||
/* Round the lowest possible end address up to a PMD boundary. */
|
||||
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
|
||||
if (end >= TASK_SIZE_MAX)
|
||||
end = TASK_SIZE_MAX;
|
||||
end -= len;
|
||||
/* This loses some more bits than a modulo, but is cheaper */
|
||||
offset = get_random_int() & (PTRS_PER_PTE - 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
if (addr >= end)
|
||||
addr = end;
|
||||
|
||||
if (end > start) {
|
||||
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
} else {
|
||||
addr = start;
|
||||
}
|
||||
|
||||
/*
|
||||
* page-align it here so that get_unmapped_area doesn't
|
||||
* align it wrongfully again to the next page. addr can come in 4K
|
||||
* unaligned here as a result of stack start randomization.
|
||||
* Forcibly align the final address in case we have a hardware
|
||||
* issue that requires alignment for performance reasons.
|
||||
*/
|
||||
addr = PAGE_ALIGN(addr);
|
||||
addr = align_vdso_addr(addr);
|
||||
|
||||
return addr;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <xen/interface/physdev.h>
|
||||
#include <xen/interface/vcpu.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/interface/nmi.h>
|
||||
#include <xen/interface/xen-mca.h>
|
||||
#include <xen/features.h>
|
||||
#include <xen/page.h>
|
||||
@ -66,6 +67,7 @@
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/stackprotector.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/mach_traps.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/pci_x86.h>
|
||||
#include <asm/pat.h>
|
||||
@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
|
||||
.emergency_restart = xen_emergency_restart,
|
||||
};
|
||||
|
||||
static unsigned char xen_get_nmi_reason(void)
|
||||
{
|
||||
unsigned char reason = 0;
|
||||
|
||||
/* Construct a value which looks like it came from port 0x61. */
|
||||
if (test_bit(_XEN_NMIREASON_io_error,
|
||||
&HYPERVISOR_shared_info->arch.nmi_reason))
|
||||
reason |= NMI_REASON_IOCHK;
|
||||
if (test_bit(_XEN_NMIREASON_pci_serr,
|
||||
&HYPERVISOR_shared_info->arch.nmi_reason))
|
||||
reason |= NMI_REASON_SERR;
|
||||
|
||||
return reason;
|
||||
}
|
||||
|
||||
static void __init xen_boot_params_init_edd(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_EDD)
|
||||
@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
pv_info = xen_info;
|
||||
pv_init_ops = xen_init_ops;
|
||||
pv_apic_ops = xen_apic_ops;
|
||||
if (!xen_pvh_domain())
|
||||
if (!xen_pvh_domain()) {
|
||||
pv_cpu_ops = xen_cpu_ops;
|
||||
|
||||
x86_platform.get_nmi_reason = xen_get_nmi_reason;
|
||||
}
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
|
||||
else
|
||||
|
@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
|
||||
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
|
||||
}
|
||||
|
||||
/* Only to be called in case of a race for a page just allocated! */
|
||||
static void free_p2m_page(void *p)
|
||||
static void __ref free_p2m_page(void *p)
|
||||
{
|
||||
BUG_ON(!slab_is_available());
|
||||
if (unlikely(!slab_is_available())) {
|
||||
free_bootmem((unsigned long)p, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
free_page((unsigned long)p);
|
||||
}
|
||||
|
||||
@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
|
||||
p2m_missing_pte : p2m_identity_pte;
|
||||
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
|
||||
pmdp = populate_extra_pmd(
|
||||
(unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
|
||||
(unsigned long)(p2m + pfn) + i * PMD_SIZE);
|
||||
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
|
||||
}
|
||||
}
|
||||
@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
|
||||
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
|
||||
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
|
||||
*/
|
||||
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
|
||||
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
|
||||
{
|
||||
pte_t *ptechk;
|
||||
pte_t *pteret = ptep;
|
||||
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
|
||||
pmd_t *pmdp;
|
||||
unsigned int level;
|
||||
@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
|
||||
if (ptechk == pte_pg) {
|
||||
set_pmd(pmdp,
|
||||
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
|
||||
if (vaddr == (addr & ~(PMD_SIZE - 1)))
|
||||
pteret = pte_offset_kernel(pmdp, addr);
|
||||
pte_newpg[i] = NULL;
|
||||
}
|
||||
|
||||
@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
|
||||
return pteret;
|
||||
return lookup_address(addr, &level);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
|
||||
|
||||
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
|
||||
/* PMD level is missing, allocate a new one */
|
||||
ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
|
||||
ptep = alloc_p2m_pmd(addr, pte_pg);
|
||||
if (!ptep)
|
||||
return false;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
|
||||
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
|
||||
{
|
||||
int i;
|
||||
unsigned long addr = PFN_PHYS(pfn);
|
||||
phys_addr_t addr = PFN_PHYS(pfn);
|
||||
|
||||
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
|
||||
if (addr >= xen_extra_mem[i].start &&
|
||||
@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
|
||||
if (!xen_extra_mem[i].size)
|
||||
continue;
|
||||
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
|
||||
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
|
||||
for (pfn = pfn_s; pfn < pfn_e; pfn++)
|
||||
@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
|
||||
* as a fallback if the remapping fails.
|
||||
*/
|
||||
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
|
||||
unsigned long *released)
|
||||
unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
|
||||
{
|
||||
unsigned long len = 0;
|
||||
unsigned long pfn, end;
|
||||
int ret;
|
||||
|
||||
WARN_ON(start_pfn > end_pfn);
|
||||
|
||||
/* Release pages first. */
|
||||
end = min(end_pfn, nr_pages);
|
||||
for (pfn = start_pfn; pfn < end; pfn++) {
|
||||
unsigned long mfn = pfn_to_mfn(pfn);
|
||||
@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
|
||||
|
||||
if (ret == 1) {
|
||||
(*released)++;
|
||||
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
|
||||
break;
|
||||
len++;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
/* Need to release pages first */
|
||||
*released += len;
|
||||
*identity += set_phys_range_identity(start_pfn, end_pfn);
|
||||
set_phys_range_identity(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
|
||||
}
|
||||
|
||||
/* Update kernel mapping, but not for highmem. */
|
||||
if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
|
||||
if (pfn >= PFN_UP(__pa(high_memory - 1)))
|
||||
return;
|
||||
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
||||
unsigned long ident_pfn_iter, remap_pfn_iter;
|
||||
unsigned long ident_end_pfn = start_pfn + size;
|
||||
unsigned long left = size;
|
||||
unsigned long ident_cnt = 0;
|
||||
unsigned int i, chunk;
|
||||
|
||||
WARN_ON(size == 0);
|
||||
@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
||||
xen_remap_mfn = mfn;
|
||||
|
||||
/* Set identity map */
|
||||
ident_cnt += set_phys_range_identity(ident_pfn_iter,
|
||||
ident_pfn_iter + chunk);
|
||||
set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
|
||||
|
||||
left -= chunk;
|
||||
}
|
||||
@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
||||
static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
|
||||
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
|
||||
unsigned long *identity, unsigned long *released)
|
||||
unsigned long *released, unsigned long *remapped)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long i = 0;
|
||||
@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
/* Do not remap pages beyond the current allocation */
|
||||
if (cur_pfn >= nr_pages) {
|
||||
/* Identity map remaining pages */
|
||||
*identity += set_phys_range_identity(cur_pfn,
|
||||
cur_pfn + size);
|
||||
set_phys_range_identity(cur_pfn, cur_pfn + size);
|
||||
break;
|
||||
}
|
||||
if (cur_pfn + size > nr_pages)
|
||||
@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
if (!remap_range_size) {
|
||||
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
|
||||
xen_set_identity_and_release_chunk(cur_pfn,
|
||||
cur_pfn + left, nr_pages, identity, released);
|
||||
cur_pfn + left, nr_pages, released);
|
||||
break;
|
||||
}
|
||||
/* Adjust size to fit in current e820 RAM region */
|
||||
@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
/* Update variables to reflect new mappings. */
|
||||
i += size;
|
||||
remap_pfn += size;
|
||||
*identity += size;
|
||||
*remapped += size;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
|
||||
static void __init xen_set_identity_and_remap(
|
||||
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
|
||||
unsigned long *released)
|
||||
unsigned long *released, unsigned long *remapped)
|
||||
{
|
||||
phys_addr_t start = 0;
|
||||
unsigned long identity = 0;
|
||||
unsigned long last_pfn = nr_pages;
|
||||
const struct e820entry *entry;
|
||||
unsigned long num_released = 0;
|
||||
unsigned long num_remapped = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
|
||||
last_pfn = xen_set_identity_and_remap_chunk(
|
||||
list, map_size, start_pfn,
|
||||
end_pfn, nr_pages, last_pfn,
|
||||
&identity, &num_released);
|
||||
&num_released, &num_remapped);
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
*released = num_released;
|
||||
*remapped = num_remapped;
|
||||
|
||||
pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
|
||||
pr_info("Released %ld page(s)\n", num_released);
|
||||
}
|
||||
|
||||
@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
|
||||
struct xen_memory_map memmap;
|
||||
unsigned long max_pages;
|
||||
unsigned long extra_pages = 0;
|
||||
unsigned long remapped_pages;
|
||||
int i;
|
||||
int op;
|
||||
|
||||
@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
|
||||
* underlying RAM.
|
||||
*/
|
||||
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
|
||||
&xen_released_pages);
|
||||
&xen_released_pages, &remapped_pages);
|
||||
|
||||
extra_pages += xen_released_pages;
|
||||
extra_pages += remapped_pages;
|
||||
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
|
@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
|
||||
|
||||
struct xen_clock_event_device {
|
||||
struct clock_event_device evt;
|
||||
char *name;
|
||||
char name[16];
|
||||
};
|
||||
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
|
||||
|
||||
@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
|
||||
if (evt->irq >= 0) {
|
||||
unbind_from_irqhandler(evt->irq, NULL);
|
||||
evt->irq = -1;
|
||||
kfree(per_cpu(xen_clock_events, cpu).name);
|
||||
per_cpu(xen_clock_events, cpu).name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void xen_setup_timer(int cpu)
|
||||
{
|
||||
char *name;
|
||||
struct clock_event_device *evt;
|
||||
struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
|
||||
struct clock_event_device *evt = &xevt->evt;
|
||||
int irq;
|
||||
|
||||
evt = &per_cpu(xen_clock_events, cpu).evt;
|
||||
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
|
||||
if (evt->irq >= 0)
|
||||
xen_teardown_timer(cpu);
|
||||
|
||||
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
|
||||
|
||||
name = kasprintf(GFP_KERNEL, "timer%d", cpu);
|
||||
if (!name)
|
||||
name = "<timer kasprintf failed>";
|
||||
snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
|
||||
|
||||
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
|
||||
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
|
||||
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
|
||||
name, NULL);
|
||||
xevt->name, NULL);
|
||||
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
|
||||
|
||||
memcpy(evt, xen_clockevent, sizeof(*evt));
|
||||
|
||||
evt->cpumask = cpumask_of(cpu);
|
||||
evt->irq = irq;
|
||||
per_cpu(xen_clock_events, cpu).name = name;
|
||||
}
|
||||
|
||||
|
||||
void xen_setup_cpu_clockevents(void)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
||||
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
|
||||
}
|
||||
|
||||
|
@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
||||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_wake_waiters(q);
|
||||
else {
|
||||
struct request_list *rl;
|
||||
|
||||
blk_queue_for_each_rl(rl, q) {
|
||||
if (rl->rq_pool) {
|
||||
wake_up(&rl->wait[BLK_RW_SYNC]);
|
||||
wake_up(&rl->wait[BLK_RW_ASYNC]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
||||
|
||||
/**
|
||||
* blk_cleanup_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
|
||||
/* mark @q DYING, no new request or merges will be allowed afterwards */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
||||
blk_set_queue_dying(q);
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/*
|
||||
|
@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
}
|
||||
|
||||
/*
|
||||
* Wakeup all potentially sleeping on normal (non-reserved) tags
|
||||
* Wakeup all potentially sleeping on tags
|
||||
*/
|
||||
static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
|
||||
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
|
||||
{
|
||||
struct blk_mq_bitmap_tags *bt;
|
||||
int i, wake_index;
|
||||
@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
|
||||
|
||||
wake_index = bt_index_inc(wake_index);
|
||||
}
|
||||
|
||||
if (include_reserve) {
|
||||
bt = &tags->breserved_tags;
|
||||
if (waitqueue_active(&bt->bs[0].wait))
|
||||
wake_up(&bt->bs[0].wait);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
||||
|
||||
atomic_dec(&tags->active_queues);
|
||||
|
||||
blk_mq_tag_wakeup_all(tags);
|
||||
blk_mq_tag_wakeup_all(tags, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
|
||||
* static and should never need resizing.
|
||||
*/
|
||||
bt_update_count(&tags->bitmap_tags, tdepth);
|
||||
blk_mq_tag_wakeup_all(tags);
|
||||
blk_mq_tag_wakeup_all(tags, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
|
||||
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
|
||||
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
|
||||
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
|
||||
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
||||
|
||||
enum {
|
||||
BLK_MQ_TAG_CACHE_MIN = 1,
|
||||
|
@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
|
||||
static void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
{
|
||||
bool freeze;
|
||||
|
||||
@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
blk_mq_run_queues(q, false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
|
||||
|
||||
static void blk_mq_freeze_queue_wait(struct request_queue *q)
|
||||
{
|
||||
@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
}
|
||||
|
||||
static void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
{
|
||||
bool wake;
|
||||
|
||||
@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
|
||||
|
||||
void blk_mq_wake_waiters(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hw_queue_mapped(hctx))
|
||||
blk_mq_tag_wakeup_all(hctx->tags, true);
|
||||
|
||||
/*
|
||||
* If we are called because the queue has now been marked as
|
||||
* dying, we need to ensure that processes currently waiting on
|
||||
* the queue are notified as well.
|
||||
*/
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
|
||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
||||
ctx = alloc_data.ctx;
|
||||
}
|
||||
blk_mq_put_ctx(ctx);
|
||||
if (!rq)
|
||||
if (!rq) {
|
||||
blk_mq_queue_exit(q);
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
}
|
||||
return rq;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_alloc_request);
|
||||
@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_complete_request);
|
||||
|
||||
int blk_mq_request_started(struct request *rq)
|
||||
{
|
||||
return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_request_started);
|
||||
|
||||
void blk_mq_start_request(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
|
||||
|
||||
void blk_mq_cancel_requeue_work(struct request_queue *q)
|
||||
{
|
||||
cancel_work_sync(&q->requeue_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
|
||||
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q)
|
||||
{
|
||||
kblockd_schedule_work(&q->requeue_work);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
||||
|
||||
void blk_mq_abort_requeue_list(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
LIST_HEAD(rq_list);
|
||||
|
||||
spin_lock_irqsave(&q->requeue_lock, flags);
|
||||
list_splice_init(&q->requeue_list, &rq_list);
|
||||
spin_unlock_irqrestore(&q->requeue_lock, flags);
|
||||
|
||||
while (!list_empty(&rq_list)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(&rq_list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_abort_requeue_list);
|
||||
|
||||
static inline bool is_flush_request(struct request *rq,
|
||||
struct blk_flush_queue *fq, unsigned int tag)
|
||||
{
|
||||
@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv, bool reserved)
|
||||
{
|
||||
struct blk_mq_timeout_data *data = priv;
|
||||
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
||||
/*
|
||||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_complete_request(rq);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (rq->cmd_flags & REQ_NO_TIMEOUT)
|
||||
return;
|
||||
|
||||
if (time_after_eq(jiffies, rq->deadline)) {
|
||||
@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
hctx->queue = q;
|
||||
hctx->queue_num = hctx_idx;
|
||||
hctx->flags = set->flags;
|
||||
hctx->cmd_size = set->cmd_size;
|
||||
|
||||
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
|
||||
blk_mq_hctx_notify, hctx);
|
||||
|
@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
|
||||
void blk_mq_clone_flush_request(struct request *flush_rq,
|
||||
struct request *orig_rq);
|
||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||
void blk_mq_wake_waiters(struct request_queue *q);
|
||||
|
||||
/*
|
||||
* CPU hotplug helpers
|
||||
|
@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
|
||||
struct request_queue *q = req->q;
|
||||
unsigned long expiry;
|
||||
|
||||
if (req->cmd_flags & REQ_NO_TIMEOUT)
|
||||
return;
|
||||
|
||||
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
|
||||
if (!q->mq_ops && !q->rq_timed_out_fn)
|
||||
return;
|
||||
|
@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
|
||||
obj-y += tty/
|
||||
obj-y += char/
|
||||
|
||||
# gpu/ comes after char for AGP vs DRM startup
|
||||
# iommu/ comes before gpu as gpu are using iommu controllers
|
||||
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
|
||||
|
||||
# gpu/ comes after char for AGP vs DRM startup and after iommu
|
||||
obj-y += gpu/
|
||||
|
||||
obj-$(CONFIG_CONNECTOR) += connector/
|
||||
@ -141,7 +144,6 @@ obj-y += clk/
|
||||
|
||||
obj-$(CONFIG_MAILBOX) += mailbox/
|
||||
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
|
||||
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
|
||||
obj-$(CONFIG_REMOTEPROC) += remoteproc/
|
||||
obj-$(CONFIG_RPMSG) += rpmsg/
|
||||
|
||||
|
@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
acpi_status status;
|
||||
int ret;
|
||||
|
||||
if (pr->apic_id == -1)
|
||||
if (pr->phys_id == -1)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
|
||||
@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
cpu_maps_update_begin();
|
||||
cpu_hotplug_begin();
|
||||
|
||||
ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
|
||||
ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = arch_register_cpu(pr->id);
|
||||
if (ret) {
|
||||
acpi_unmap_lsapic(pr->id);
|
||||
acpi_unmap_cpu(pr->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
struct acpi_processor *pr = acpi_driver_data(device);
|
||||
int apic_id, cpu_index, device_declaration = 0;
|
||||
int phys_id, cpu_index, device_declaration = 0;
|
||||
acpi_status status = AE_OK;
|
||||
static int cpu0_initialized;
|
||||
unsigned long long value;
|
||||
@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
||||
pr->acpi_id = value;
|
||||
}
|
||||
|
||||
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
|
||||
if (apic_id < 0)
|
||||
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
|
||||
pr->apic_id = apic_id;
|
||||
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
|
||||
if (phys_id < 0)
|
||||
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
|
||||
pr->phys_id = phys_id;
|
||||
|
||||
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
|
||||
cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
|
||||
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
|
||||
cpu0_initialized = 1;
|
||||
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
|
||||
/*
|
||||
* Handle UP system running SMP kernel, with no CPU
|
||||
* entry in MADT
|
||||
*/
|
||||
if ((cpu_index == -1) && (num_online_cpus() == 1))
|
||||
cpu_index = 0;
|
||||
}
|
||||
@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
|
||||
|
||||
/* Remove the CPU. */
|
||||
arch_unregister_cpu(pr->id);
|
||||
acpi_unmap_lsapic(pr->id);
|
||||
acpi_unmap_cpu(pr->id);
|
||||
|
||||
cpu_hotplug_done();
|
||||
cpu_maps_update_done();
|
||||
|
@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
|
||||
|
||||
device->power.state = ACPI_STATE_UNKNOWN;
|
||||
if (!acpi_device_is_present(device))
|
||||
return 0;
|
||||
return -ENXIO;
|
||||
|
||||
result = acpi_device_get_power(device, &state);
|
||||
if (result)
|
||||
|
@ -14,10 +14,10 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define DO_ENUMERATION 0x01
|
||||
#define INT3401_DEVICE 0X01
|
||||
static const struct acpi_device_id int340x_thermal_device_ids[] = {
|
||||
{"INT3400", DO_ENUMERATION },
|
||||
{"INT3401"},
|
||||
{"INT3400"},
|
||||
{"INT3401", INT3401_DEVICE},
|
||||
{"INT3402"},
|
||||
{"INT3403"},
|
||||
{"INT3404"},
|
||||
@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
|
||||
const struct acpi_device_id *id)
|
||||
{
|
||||
#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
|
||||
if (id->driver_data == DO_ENUMERATION)
|
||||
acpi_create_platform_device(adev);
|
||||
#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
|
||||
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
|
||||
if (id->driver_data == INT3401_DEVICE)
|
||||
acpi_create_platform_device(adev);
|
||||
#endif
|
||||
return 1;
|
||||
|
@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
|
||||
unsigned long madt_end, entry;
|
||||
static struct acpi_table_madt *madt;
|
||||
static int read_madt;
|
||||
int apic_id = -1;
|
||||
int phys_id = -1; /* CPU hardware ID */
|
||||
|
||||
if (!read_madt) {
|
||||
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
|
||||
@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
|
||||
}
|
||||
|
||||
if (!madt)
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
|
||||
entry = (unsigned long)madt;
|
||||
madt_end = entry + madt->header.length;
|
||||
@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
|
||||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &apic_id))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &apic_id))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &apic_id))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
}
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_subtable_header *header;
|
||||
int apic_id = -1;
|
||||
int phys_id = -1;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
goto exit;
|
||||
@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &apic_id);
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &apic_id);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &apic_id);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
|
||||
int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
int apic_id;
|
||||
int phys_id;
|
||||
|
||||
apic_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (apic_id == -1)
|
||||
apic_id = map_madt_entry(type, acpi_id);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (phys_id == -1)
|
||||
phys_id = map_madt_entry(type, acpi_id);
|
||||
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
||||
int acpi_map_cpuid(int phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int i;
|
||||
#endif
|
||||
|
||||
if (apic_id == -1) {
|
||||
if (phys_id == -1) {
|
||||
/*
|
||||
* On UP processor, there is no _MAT or MADT table.
|
||||
* So above apic_id is always set to -1.
|
||||
* So above phys_id is always set to -1.
|
||||
*
|
||||
* BIOS may define multiple CPU handles even for UP processor.
|
||||
* For example,
|
||||
@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
||||
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
|
||||
* }
|
||||
*
|
||||
* Ignores apic_id and always returns 0 for the processor
|
||||
* Ignores phys_id and always returns 0 for the processor
|
||||
* handle with acpi id 0 if nr_cpu_ids is 1.
|
||||
* This should be the case if SMP tables are not found.
|
||||
* Return -1 for other CPU's handle.
|
||||
@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
||||
if (nr_cpu_ids <= 1 && acpi_id == 0)
|
||||
return acpi_id;
|
||||
else
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
for_each_possible_cpu(i) {
|
||||
if (cpu_physical_id(i) == apic_id)
|
||||
if (cpu_physical_id(i) == phys_id)
|
||||
return i;
|
||||
}
|
||||
#else
|
||||
/* In UP kernel, only processor 0 is valid */
|
||||
if (apic_id == 0)
|
||||
return apic_id;
|
||||
if (phys_id == 0)
|
||||
return phys_id;
|
||||
#endif
|
||||
return -1;
|
||||
}
|
||||
|
||||
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
int apic_id;
|
||||
int phys_id;
|
||||
|
||||
apic_id = acpi_get_apicid(handle, type, acpi_id);
|
||||
phys_id = acpi_get_phys_id(handle, type, acpi_id);
|
||||
|
||||
return acpi_map_cpuid(apic_id, acpi_id);
|
||||
return acpi_map_cpuid(phys_id, acpi_id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
|
||||
if (device->wakeup.flags.valid)
|
||||
acpi_power_resources_list_free(&device->wakeup.resources);
|
||||
|
||||
if (!device->flags.power_manageable)
|
||||
if (!device->power.flags.power_resources)
|
||||
return;
|
||||
|
||||
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
|
||||
@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
|
||||
device->power.flags.power_resources)
|
||||
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
|
||||
|
||||
if (acpi_bus_init_power(device)) {
|
||||
acpi_free_power_resources_lists(device);
|
||||
if (acpi_bus_init_power(device))
|
||||
device->flags.power_manageable = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_bus_get_flags(struct acpi_device *device)
|
||||
@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
|
||||
/* Skip devices that are not present. */
|
||||
if (!acpi_device_is_present(device)) {
|
||||
device->flags.visited = false;
|
||||
device->flags.power_manageable = 0;
|
||||
return;
|
||||
}
|
||||
if (device->handler)
|
||||
goto ok;
|
||||
|
||||
if (!device->flags.initialized) {
|
||||
acpi_bus_update_power(device, NULL);
|
||||
device->flags.power_manageable =
|
||||
device->power.states[ACPI_STATE_D0].flags.valid;
|
||||
if (acpi_bus_init_power(device))
|
||||
device->flags.power_manageable = 0;
|
||||
|
||||
device->flags.initialized = true;
|
||||
}
|
||||
device->flags.visited = false;
|
||||
|
@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
|
||||
.callback = video_disable_native_backlight,
|
||||
.ident = "Dell XPS15 L521X",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -530,7 +530,7 @@ static int null_add_dev(void)
|
||||
goto out_cleanup_queues;
|
||||
|
||||
nullb->q = blk_mq_init_queue(&nullb->tag_set);
|
||||
if (!nullb->q) {
|
||||
if (IS_ERR(nullb->q)) {
|
||||
rv = -ENOMEM;
|
||||
goto out_cleanup_tags;
|
||||
}
|
||||
|
@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
|
||||
cmd->fn = handler;
|
||||
cmd->ctx = ctx;
|
||||
cmd->aborted = 0;
|
||||
blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
|
||||
}
|
||||
|
||||
/* Special values must be less than 0x1000 */
|
||||
@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
||||
if (unlikely(status)) {
|
||||
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
|
||||
&& (jiffies - req->start_time) < req->timeout) {
|
||||
unsigned long flags;
|
||||
|
||||
blk_mq_requeue_request(req);
|
||||
blk_mq_kick_requeue_list(req->q);
|
||||
spin_lock_irqsave(req->q->queue_lock, flags);
|
||||
if (!blk_queue_stopped(req->q))
|
||||
blk_mq_kick_requeue_list(req->q);
|
||||
spin_unlock_irqrestore(req->q->queue_lock, flags);
|
||||
return;
|
||||
}
|
||||
req->errors = nvme_error_status(status);
|
||||
@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
}
|
||||
}
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
||||
nvme_set_info(cmd, iod, req_completion);
|
||||
spin_lock_irq(&nvmeq->q_lock);
|
||||
if (req->cmd_flags & REQ_DISCARD)
|
||||
@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
req->cmd_flags |= REQ_NO_TIMEOUT;
|
||||
cmd_info = blk_mq_rq_to_pdu(req);
|
||||
nvme_set_info(cmd_info, req, async_req_completion);
|
||||
|
||||
@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
|
||||
struct nvme_command cmd;
|
||||
|
||||
if (!nvmeq->qid || cmd_rq->aborted) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_list_lock, flags);
|
||||
if (work_busy(&dev->reset_work))
|
||||
return;
|
||||
goto out;
|
||||
list_del_init(&dev->node);
|
||||
dev_warn(&dev->pci_dev->dev,
|
||||
"I/O %d QID %d timeout, reset controller\n",
|
||||
req->tag, nvmeq->qid);
|
||||
dev->reset_workfn = nvme_reset_failed_dev;
|
||||
queue_work(nvme_workq, &dev->reset_work);
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev_list_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
|
||||
void *ctx;
|
||||
nvme_completion_fn fn;
|
||||
struct nvme_cmd_info *cmd;
|
||||
static struct nvme_completion cqe = {
|
||||
.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
|
||||
};
|
||||
struct nvme_completion cqe;
|
||||
|
||||
if (!blk_mq_request_started(req))
|
||||
return;
|
||||
|
||||
cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
if (cmd->ctx == CMD_CTX_CANCELLED)
|
||||
return;
|
||||
|
||||
if (blk_queue_dying(req->q))
|
||||
cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
|
||||
else
|
||||
cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
|
||||
|
||||
|
||||
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
|
||||
req->tag, nvmeq->qid);
|
||||
ctx = cancel_cmd_info(cmd, &fn);
|
||||
@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = cmd->nvmeq;
|
||||
|
||||
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
|
||||
nvmeq->qid);
|
||||
if (nvmeq->dev->initialized)
|
||||
nvme_abort_req(req);
|
||||
|
||||
/*
|
||||
* The aborted req will be completed on receiving the abort req.
|
||||
* We enable the timer again. If hit twice, it'll cause a device reset,
|
||||
* as the device then is in a faulty state.
|
||||
*/
|
||||
return BLK_EH_RESET_TIMER;
|
||||
int ret = BLK_EH_RESET_TIMER;
|
||||
|
||||
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
|
||||
nvmeq->qid);
|
||||
|
||||
spin_lock_irq(&nvmeq->q_lock);
|
||||
if (!nvmeq->dev->initialized) {
|
||||
/*
|
||||
* Force cancelled command frees the request, which requires we
|
||||
* return BLK_EH_NOT_HANDLED.
|
||||
*/
|
||||
nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
|
||||
ret = BLK_EH_NOT_HANDLED;
|
||||
} else
|
||||
nvme_abort_req(req);
|
||||
spin_unlock_irq(&nvmeq->q_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
||||
@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
||||
*/
|
||||
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
|
||||
{
|
||||
int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
|
||||
int vector;
|
||||
|
||||
spin_lock_irq(&nvmeq->q_lock);
|
||||
if (nvmeq->cq_vector == -1) {
|
||||
spin_unlock_irq(&nvmeq->q_lock);
|
||||
return 1;
|
||||
}
|
||||
vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
|
||||
nvmeq->dev->online_queues--;
|
||||
nvmeq->cq_vector = -1;
|
||||
spin_unlock_irq(&nvmeq->q_lock);
|
||||
|
||||
irq_set_affinity_hint(vector, NULL);
|
||||
@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
|
||||
adapter_delete_sq(dev, qid);
|
||||
adapter_delete_cq(dev, qid);
|
||||
}
|
||||
if (!qid && dev->admin_q)
|
||||
blk_mq_freeze_queue_start(dev->admin_q);
|
||||
nvme_clear_queue(nvmeq);
|
||||
}
|
||||
|
||||
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
int depth, int vector)
|
||||
int depth)
|
||||
{
|
||||
struct device *dmadev = &dev->pci_dev->dev;
|
||||
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
|
||||
@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
nvmeq->cq_phase = 1;
|
||||
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
|
||||
nvmeq->q_depth = depth;
|
||||
nvmeq->cq_vector = vector;
|
||||
nvmeq->qid = qid;
|
||||
dev->queue_count++;
|
||||
dev->queues[qid] = nvmeq;
|
||||
@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
int result;
|
||||
|
||||
nvmeq->cq_vector = qid - 1;
|
||||
result = adapter_alloc_cq(dev, qid, nvmeq);
|
||||
if (result < 0)
|
||||
return result;
|
||||
@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
|
||||
.timeout = nvme_timeout,
|
||||
};
|
||||
|
||||
static void nvme_dev_remove_admin(struct nvme_dev *dev)
|
||||
{
|
||||
if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
|
||||
blk_cleanup_queue(dev->admin_q);
|
||||
blk_mq_free_tag_set(&dev->admin_tagset);
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
|
||||
{
|
||||
if (!dev->admin_q) {
|
||||
@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
|
||||
if (!dev->admin_q) {
|
||||
if (IS_ERR(dev->admin_q)) {
|
||||
blk_mq_free_tag_set(&dev->admin_tagset);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (!blk_get_queue(dev->admin_q)) {
|
||||
nvme_dev_remove_admin(dev);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else
|
||||
blk_mq_unfreeze_queue(dev->admin_q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvme_free_admin_tags(struct nvme_dev *dev)
|
||||
{
|
||||
if (dev->admin_q)
|
||||
blk_mq_free_tag_set(&dev->admin_tagset);
|
||||
}
|
||||
|
||||
static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
{
|
||||
int result;
|
||||
@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
|
||||
nvmeq = dev->queues[0];
|
||||
if (!nvmeq) {
|
||||
nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
|
||||
nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
|
||||
if (!nvmeq)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
if (result)
|
||||
goto free_nvmeq;
|
||||
|
||||
result = nvme_alloc_admin_tags(dev);
|
||||
nvmeq->cq_vector = 0;
|
||||
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
|
||||
if (result)
|
||||
goto free_nvmeq;
|
||||
|
||||
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
|
||||
if (result)
|
||||
goto free_tags;
|
||||
|
||||
return result;
|
||||
|
||||
free_tags:
|
||||
nvme_free_admin_tags(dev);
|
||||
free_nvmeq:
|
||||
nvme_free_queues(dev, 0);
|
||||
return result;
|
||||
@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
|
||||
unsigned i;
|
||||
|
||||
for (i = dev->queue_count; i <= dev->max_qid; i++)
|
||||
if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
|
||||
if (!nvme_alloc_queue(dev, i, dev->q_depth))
|
||||
break;
|
||||
|
||||
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
|
||||
@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
|
||||
break;
|
||||
if (!schedule_timeout(ADMIN_TIMEOUT) ||
|
||||
fatal_signal_pending(current)) {
|
||||
/*
|
||||
* Disable the controller first since we can't trust it
|
||||
* at this point, but leave the admin queue enabled
|
||||
* until all queue deletion requests are flushed.
|
||||
* FIXME: This may take a while if there are more h/w
|
||||
* queues than admin tags.
|
||||
*/
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
nvme_disable_ctrl(dev, readq(&dev->bar->cap));
|
||||
nvme_disable_queue(dev, 0);
|
||||
|
||||
send_sig(SIGKILL, dq->worker->task, 1);
|
||||
nvme_clear_queue(dev->queues[0]);
|
||||
flush_kthread_worker(dq->worker);
|
||||
nvme_disable_queue(dev, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
|
||||
{
|
||||
struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
|
||||
cmdinfo.work);
|
||||
allow_signal(SIGKILL);
|
||||
if (nvme_delete_sq(nvmeq))
|
||||
nvme_del_queue_end(nvmeq);
|
||||
}
|
||||
@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
|
||||
kthread_stop(tmp);
|
||||
}
|
||||
|
||||
static void nvme_freeze_queues(struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
list_for_each_entry(ns, &dev->namespaces, list) {
|
||||
blk_mq_freeze_queue_start(ns->queue);
|
||||
|
||||
spin_lock(ns->queue->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
|
||||
spin_unlock(ns->queue->queue_lock);
|
||||
|
||||
blk_mq_cancel_requeue_work(ns->queue);
|
||||
blk_mq_stop_hw_queues(ns->queue);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_unfreeze_queues(struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
list_for_each_entry(ns, &dev->namespaces, list) {
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
|
||||
blk_mq_unfreeze_queue(ns->queue);
|
||||
blk_mq_start_stopped_hw_queues(ns->queue, true);
|
||||
blk_mq_kick_requeue_list(ns->queue);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_dev_shutdown(struct nvme_dev *dev)
|
||||
{
|
||||
int i;
|
||||
@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
|
||||
dev->initialized = 0;
|
||||
nvme_dev_list_remove(dev);
|
||||
|
||||
if (dev->bar)
|
||||
if (dev->bar) {
|
||||
nvme_freeze_queues(dev);
|
||||
csts = readl(&dev->bar->csts);
|
||||
}
|
||||
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
||||
for (i = dev->queue_count - 1; i >= 0; i--) {
|
||||
struct nvme_queue *nvmeq = dev->queues[i];
|
||||
@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
|
||||
nvme_dev_unmap(dev);
|
||||
}
|
||||
|
||||
static void nvme_dev_remove_admin(struct nvme_dev *dev)
|
||||
{
|
||||
if (dev->admin_q && !blk_queue_dying(dev->admin_q))
|
||||
blk_cleanup_queue(dev->admin_q);
|
||||
}
|
||||
|
||||
static void nvme_dev_remove(struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
|
||||
list_for_each_entry(ns, &dev->namespaces, list) {
|
||||
if (ns->disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(ns->disk);
|
||||
if (!blk_queue_dying(ns->queue))
|
||||
if (!blk_queue_dying(ns->queue)) {
|
||||
blk_mq_abort_requeue_list(ns->queue);
|
||||
blk_cleanup_queue(ns->queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
|
||||
nvme_free_namespaces(dev);
|
||||
nvme_release_instance(dev);
|
||||
blk_mq_free_tag_set(&dev->tagset);
|
||||
blk_put_queue(dev->admin_q);
|
||||
kfree(dev->queues);
|
||||
kfree(dev->entry);
|
||||
kfree(dev);
|
||||
@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
|
||||
}
|
||||
|
||||
nvme_init_queue(dev->queues[0], 0);
|
||||
result = nvme_alloc_admin_tags(dev);
|
||||
if (result)
|
||||
goto disable;
|
||||
|
||||
result = nvme_setup_io_queues(dev);
|
||||
if (result)
|
||||
goto disable;
|
||||
goto free_tags;
|
||||
|
||||
nvme_set_irq_hints(dev);
|
||||
|
||||
return result;
|
||||
|
||||
free_tags:
|
||||
nvme_dev_remove_admin(dev);
|
||||
disable:
|
||||
nvme_disable_queue(dev, 0);
|
||||
nvme_dev_list_remove(dev);
|
||||
@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
|
||||
dev->reset_workfn = nvme_remove_disks;
|
||||
queue_work(nvme_workq, &dev->reset_work);
|
||||
spin_unlock(&dev_list_lock);
|
||||
} else {
|
||||
nvme_unfreeze_queues(dev);
|
||||
nvme_set_irq_hints(dev);
|
||||
}
|
||||
dev->initialized = 1;
|
||||
return 0;
|
||||
@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
flush_work(&dev->reset_work);
|
||||
misc_deregister(&dev->miscdev);
|
||||
nvme_dev_remove(dev);
|
||||
nvme_dev_shutdown(dev);
|
||||
nvme_dev_remove(dev);
|
||||
nvme_dev_remove_admin(dev);
|
||||
nvme_free_queues(dev, 0);
|
||||
nvme_free_admin_tags(dev);
|
||||
nvme_release_prp_pools(dev);
|
||||
kref_put(&dev->kref, nvme_free_dev);
|
||||
}
|
||||
|
@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
goto out_put_disk;
|
||||
|
||||
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
|
||||
if (!q) {
|
||||
if (IS_ERR(q)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_tags;
|
||||
}
|
||||
|
@ -47,13 +47,6 @@
|
||||
|
||||
#define DLN2_GPIO_MAX_PINS 32
|
||||
|
||||
struct dln2_irq_work {
|
||||
struct work_struct work;
|
||||
struct dln2_gpio *dln2;
|
||||
int pin;
|
||||
int type;
|
||||
};
|
||||
|
||||
struct dln2_gpio {
|
||||
struct platform_device *pdev;
|
||||
struct gpio_chip gpio;
|
||||
@ -64,10 +57,12 @@ struct dln2_gpio {
|
||||
*/
|
||||
DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
|
||||
|
||||
DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
|
||||
DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
|
||||
DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
|
||||
struct dln2_irq_work *irq_work;
|
||||
/* active IRQs - not synced to hardware */
|
||||
DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
|
||||
/* active IRQS - synced to hardware */
|
||||
DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
|
||||
int irq_type[DLN2_GPIO_MAX_PINS];
|
||||
struct mutex irq_lock;
|
||||
};
|
||||
|
||||
struct dln2_gpio_pin {
|
||||
@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
|
||||
return !!ret;
|
||||
}
|
||||
|
||||
static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
|
||||
unsigned int pin, int value)
|
||||
static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
|
||||
unsigned int pin, int value)
|
||||
{
|
||||
struct dln2_gpio_pin_val req = {
|
||||
.pin = cpu_to_le16(pin),
|
||||
.value = value,
|
||||
};
|
||||
|
||||
dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
|
||||
sizeof(req));
|
||||
return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
|
||||
sizeof(req));
|
||||
}
|
||||
|
||||
#define DLN2_GPIO_DIRECTION_IN 0
|
||||
@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
||||
static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
|
||||
int value)
|
||||
{
|
||||
struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
|
||||
int ret;
|
||||
|
||||
ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
|
||||
}
|
||||
|
||||
@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
|
||||
&req, sizeof(req));
|
||||
}
|
||||
|
||||
static void dln2_irq_work(struct work_struct *w)
|
||||
{
|
||||
struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
|
||||
struct dln2_gpio *dln2 = iw->dln2;
|
||||
u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
|
||||
|
||||
if (test_bit(iw->pin, dln2->irqs_enabled))
|
||||
dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
|
||||
else
|
||||
dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
|
||||
}
|
||||
|
||||
static void dln2_irq_enable(struct irq_data *irqd)
|
||||
static void dln2_irq_unmask(struct irq_data *irqd)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
set_bit(pin, dln2->irqs_enabled);
|
||||
schedule_work(&dln2->irq_work[pin].work);
|
||||
}
|
||||
|
||||
static void dln2_irq_disable(struct irq_data *irqd)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
clear_bit(pin, dln2->irqs_enabled);
|
||||
schedule_work(&dln2->irq_work[pin].work);
|
||||
set_bit(pin, dln2->unmasked_irqs);
|
||||
}
|
||||
|
||||
static void dln2_irq_mask(struct irq_data *irqd)
|
||||
@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
set_bit(pin, dln2->irqs_masked);
|
||||
}
|
||||
|
||||
static void dln2_irq_unmask(struct irq_data *irqd)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
struct device *dev = dln2->gpio.dev;
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
if (test_and_clear_bit(pin, dln2->irqs_pending)) {
|
||||
int irq;
|
||||
|
||||
irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
|
||||
if (!irq) {
|
||||
dev_err(dev, "pin %d not mapped to IRQ\n", pin);
|
||||
return;
|
||||
}
|
||||
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
clear_bit(pin, dln2->unmasked_irqs);
|
||||
}
|
||||
|
||||
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
|
||||
@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
|
||||
dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
|
||||
dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
|
||||
dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
|
||||
dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
|
||||
dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dln2_irq_bus_lock(struct irq_data *irqd)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
|
||||
mutex_lock(&dln2->irq_lock);
|
||||
}
|
||||
|
||||
static void dln2_irq_bus_unlock(struct irq_data *irqd)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
int enabled, unmasked;
|
||||
unsigned type;
|
||||
int ret;
|
||||
|
||||
enabled = test_bit(pin, dln2->enabled_irqs);
|
||||
unmasked = test_bit(pin, dln2->unmasked_irqs);
|
||||
|
||||
if (enabled != unmasked) {
|
||||
if (unmasked) {
|
||||
type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
|
||||
set_bit(pin, dln2->enabled_irqs);
|
||||
} else {
|
||||
type = DLN2_GPIO_EVENT_NONE;
|
||||
clear_bit(pin, dln2->enabled_irqs);
|
||||
}
|
||||
|
||||
ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
|
||||
if (ret)
|
||||
dev_err(dln2->gpio.dev, "failed to set event\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&dln2->irq_lock);
|
||||
}
|
||||
|
||||
static struct irq_chip dln2_gpio_irqchip = {
|
||||
.name = "dln2-irq",
|
||||
.irq_enable = dln2_irq_enable,
|
||||
.irq_disable = dln2_irq_disable,
|
||||
.irq_mask = dln2_irq_mask,
|
||||
.irq_unmask = dln2_irq_unmask,
|
||||
.irq_set_type = dln2_irq_set_type,
|
||||
.irq_bus_lock = dln2_irq_bus_lock,
|
||||
.irq_bus_sync_unlock = dln2_irq_bus_unlock,
|
||||
};
|
||||
|
||||
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
|
||||
@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(pin, dln2->irqs_enabled))
|
||||
return;
|
||||
if (test_bit(pin, dln2->irqs_masked)) {
|
||||
set_bit(pin, dln2->irqs_pending);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (dln2->irq_work[pin].type) {
|
||||
switch (dln2->irq_type[pin]) {
|
||||
case DLN2_GPIO_EVENT_CHANGE_RISING:
|
||||
if (event->value)
|
||||
generic_handle_irq(irq);
|
||||
@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
|
||||
struct dln2_gpio *dln2;
|
||||
struct device *dev = &pdev->dev;
|
||||
int pins;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
pins = dln2_gpio_get_pin_count(pdev);
|
||||
if (pins < 0) {
|
||||
@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
|
||||
if (!dln2)
|
||||
return -ENOMEM;
|
||||
|
||||
dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
|
||||
sizeof(struct dln2_irq_work), GFP_KERNEL);
|
||||
if (!dln2->irq_work)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < pins; i++) {
|
||||
INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
|
||||
dln2->irq_work[i].pin = i;
|
||||
dln2->irq_work[i].dln2 = dln2;
|
||||
}
|
||||
mutex_init(&dln2->irq_lock);
|
||||
|
||||
dln2->pdev = pdev;
|
||||
|
||||
@ -529,11 +510,8 @@ static int dln2_gpio_probe(struct platform_device *pdev)
|
||||
static int dln2_gpio_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
|
||||
for (i = 0; i < dln2->gpio.ngpio; i++)
|
||||
flush_work(&dln2->irq_work[i].work);
|
||||
gpiochip_remove(&dln2->gpio);
|
||||
|
||||
return 0;
|
||||
|
@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
err = gpiochip_add(gc);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "Could not add gpiochip\n");
|
||||
irq_domain_remove(priv->domain);
|
||||
if (priv->domain)
|
||||
irq_domain_remove(priv->domain);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
|
||||
obj-$(CONFIG_DRM_TTM) += ttm/
|
||||
obj-$(CONFIG_DRM_TDFX) += tdfx/
|
||||
obj-$(CONFIG_DRM_R128) += r128/
|
||||
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
||||
obj-$(CONFIG_DRM_RADEON)+= radeon/
|
||||
obj-$(CONFIG_DRM_MGA) += mga/
|
||||
obj-$(CONFIG_DRM_I810) += i810/
|
||||
@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
|
||||
obj-y += i2c/
|
||||
obj-y += panel/
|
||||
obj-y += bridge/
|
||||
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <uapi/linux/kfd_ioctl.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <uapi/asm-generic/mman-common.h>
|
||||
#include <asm/processor.h>
|
||||
#include "kfd_priv.h"
|
||||
@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
|
||||
void __user *arg)
|
||||
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
|
||||
void *data)
|
||||
{
|
||||
struct kfd_ioctl_get_version_args args;
|
||||
struct kfd_ioctl_get_version_args *args = data;
|
||||
int err = 0;
|
||||
|
||||
args.major_version = KFD_IOCTL_MAJOR_VERSION;
|
||||
args.minor_version = KFD_IOCTL_MINOR_VERSION;
|
||||
|
||||
if (copy_to_user(arg, &args, sizeof(args)))
|
||||
err = -EFAULT;
|
||||
args->major_version = KFD_IOCTL_MAJOR_VERSION;
|
||||
args->minor_version = KFD_IOCTL_MINOR_VERSION;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
void __user *arg)
|
||||
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
void *data)
|
||||
{
|
||||
struct kfd_ioctl_create_queue_args args;
|
||||
struct kfd_ioctl_create_queue_args *args = data;
|
||||
struct kfd_dev *dev;
|
||||
int err = 0;
|
||||
unsigned int queue_id;
|
||||
@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
|
||||
memset(&q_properties, 0, sizeof(struct queue_properties));
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
pr_debug("kfd: creating queue ioctl\n");
|
||||
|
||||
err = set_queue_properties_from_user(&q_properties, &args);
|
||||
err = set_queue_properties_from_user(&q_properties, args);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev = kfd_device_by_id(args.gpu_id);
|
||||
dev = kfd_device_by_id(args->gpu_id);
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
|
||||
pdd = kfd_bind_process_to_device(dev, p);
|
||||
if (IS_ERR(pdd)) {
|
||||
err = PTR_ERR(pdd);
|
||||
err = -ESRCH;
|
||||
goto err_bind_process;
|
||||
}
|
||||
|
||||
@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
if (err != 0)
|
||||
goto err_create_queue;
|
||||
|
||||
args.queue_id = queue_id;
|
||||
args->queue_id = queue_id;
|
||||
|
||||
/* Return gpu_id as doorbell offset for mmap usage */
|
||||
args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
|
||||
|
||||
if (copy_to_user(arg, &args, sizeof(args))) {
|
||||
err = -EFAULT;
|
||||
goto err_copy_args_out;
|
||||
}
|
||||
args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
|
||||
pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
|
||||
|
||||
pr_debug("ring buffer address == 0x%016llX\n",
|
||||
args.ring_base_address);
|
||||
args->ring_base_address);
|
||||
|
||||
pr_debug("read ptr address == 0x%016llX\n",
|
||||
args.read_pointer_address);
|
||||
args->read_pointer_address);
|
||||
|
||||
pr_debug("write ptr address == 0x%016llX\n",
|
||||
args.write_pointer_address);
|
||||
args->write_pointer_address);
|
||||
|
||||
return 0;
|
||||
|
||||
err_copy_args_out:
|
||||
pqm_destroy_queue(&p->pqm, queue_id);
|
||||
err_create_queue:
|
||||
err_bind_process:
|
||||
mutex_unlock(&p->mutex);
|
||||
@ -297,99 +283,90 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
|
||||
}
|
||||
|
||||
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
|
||||
void __user *arg)
|
||||
void *data)
|
||||
{
|
||||
int retval;
|
||||
struct kfd_ioctl_destroy_queue_args args;
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
struct kfd_ioctl_destroy_queue_args *args = data;
|
||||
|
||||
pr_debug("kfd: destroying queue id %d for PASID %d\n",
|
||||
args.queue_id,
|
||||
args->queue_id,
|
||||
p->pasid);
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
|
||||
retval = pqm_destroy_queue(&p->pqm, args.queue_id);
|
||||
retval = pqm_destroy_queue(&p->pqm, args->queue_id);
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
|
||||
void __user *arg)
|
||||
void *data)
|
||||
{
|
||||
int retval;
|
||||
struct kfd_ioctl_update_queue_args args;
|
||||
struct kfd_ioctl_update_queue_args *args = data;
|
||||
struct queue_properties properties;
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
|
||||
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
|
||||
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
|
||||
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
|
||||
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((args.ring_base_address) &&
|
||||
if ((args->ring_base_address) &&
|
||||
(!access_ok(VERIFY_WRITE,
|
||||
(const void __user *) args.ring_base_address,
|
||||
(const void __user *) args->ring_base_address,
|
||||
sizeof(uint64_t)))) {
|
||||
pr_err("kfd: can't access ring base address\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
|
||||
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
|
||||
pr_err("kfd: ring size must be a power of 2 or 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
properties.queue_address = args.ring_base_address;
|
||||
properties.queue_size = args.ring_size;
|
||||
properties.queue_percent = args.queue_percentage;
|
||||
properties.priority = args.queue_priority;
|
||||
properties.queue_address = args->ring_base_address;
|
||||
properties.queue_size = args->ring_size;
|
||||
properties.queue_percent = args->queue_percentage;
|
||||
properties.priority = args->queue_priority;
|
||||
|
||||
pr_debug("kfd: updating queue id %d for PASID %d\n",
|
||||
args.queue_id, p->pasid);
|
||||
args->queue_id, p->pasid);
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
|
||||
retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
|
||||
retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static long kfd_ioctl_set_memory_policy(struct file *filep,
|
||||
struct kfd_process *p, void __user *arg)
|
||||
static int kfd_ioctl_set_memory_policy(struct file *filep,
|
||||
struct kfd_process *p, void *data)
|
||||
{
|
||||
struct kfd_ioctl_set_memory_policy_args args;
|
||||
struct kfd_ioctl_set_memory_policy_args *args = data;
|
||||
struct kfd_dev *dev;
|
||||
int err = 0;
|
||||
struct kfd_process_device *pdd;
|
||||
enum cache_policy default_policy, alternate_policy;
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
|
||||
&& args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
|
||||
if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
|
||||
&& args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
|
||||
&& args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
|
||||
if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
|
||||
&& args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = kfd_device_by_id(args.gpu_id);
|
||||
dev = kfd_device_by_id(args->gpu_id);
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
|
||||
|
||||
pdd = kfd_bind_process_to_device(dev, p);
|
||||
if (IS_ERR(pdd)) {
|
||||
err = PTR_ERR(pdd);
|
||||
err = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
|
||||
default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
|
||||
default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
|
||||
? cache_policy_coherent : cache_policy_noncoherent;
|
||||
|
||||
alternate_policy =
|
||||
(args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
|
||||
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
|
||||
? cache_policy_coherent : cache_policy_noncoherent;
|
||||
|
||||
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
|
||||
&pdd->qpd,
|
||||
default_policy,
|
||||
alternate_policy,
|
||||
(void __user *)args.alternate_aperture_base,
|
||||
args.alternate_aperture_size))
|
||||
(void __user *)args->alternate_aperture_base,
|
||||
args->alternate_aperture_size))
|
||||
err = -EINVAL;
|
||||
|
||||
out:
|
||||
@ -422,53 +399,44 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
|
||||
return err;
|
||||
}
|
||||
|
||||
static long kfd_ioctl_get_clock_counters(struct file *filep,
|
||||
struct kfd_process *p, void __user *arg)
|
||||
static int kfd_ioctl_get_clock_counters(struct file *filep,
|
||||
struct kfd_process *p, void *data)
|
||||
{
|
||||
struct kfd_ioctl_get_clock_counters_args args;
|
||||
struct kfd_ioctl_get_clock_counters_args *args = data;
|
||||
struct kfd_dev *dev;
|
||||
struct timespec time;
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
dev = kfd_device_by_id(args.gpu_id);
|
||||
dev = kfd_device_by_id(args->gpu_id);
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Reading GPU clock counter from KGD */
|
||||
args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
||||
args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
||||
|
||||
/* No access to rdtsc. Using raw monotonic time */
|
||||
getrawmonotonic(&time);
|
||||
args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
|
||||
args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
|
||||
|
||||
get_monotonic_boottime(&time);
|
||||
args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
|
||||
args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
|
||||
|
||||
/* Since the counter is in nano-seconds we use 1GHz frequency */
|
||||
args.system_clock_freq = 1000000000;
|
||||
|
||||
if (copy_to_user(arg, &args, sizeof(args)))
|
||||
return -EFAULT;
|
||||
args->system_clock_freq = 1000000000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int kfd_ioctl_get_process_apertures(struct file *filp,
|
||||
struct kfd_process *p, void __user *arg)
|
||||
struct kfd_process *p, void *data)
|
||||
{
|
||||
struct kfd_ioctl_get_process_apertures_args args;
|
||||
struct kfd_ioctl_get_process_apertures_args *args = data;
|
||||
struct kfd_process_device_apertures *pAperture;
|
||||
struct kfd_process_device *pdd;
|
||||
|
||||
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
|
||||
|
||||
if (copy_from_user(&args, arg, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
args.num_of_nodes = 0;
|
||||
args->num_of_nodes = 0;
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
|
||||
@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
|
||||
/* Run over all pdd of the process */
|
||||
pdd = kfd_get_first_process_device_data(p);
|
||||
do {
|
||||
pAperture = &args.process_apertures[args.num_of_nodes];
|
||||
pAperture =
|
||||
&args->process_apertures[args->num_of_nodes];
|
||||
pAperture->gpu_id = pdd->dev->id;
|
||||
pAperture->lds_base = pdd->lds_base;
|
||||
pAperture->lds_limit = pdd->lds_limit;
|
||||
@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
|
||||
pAperture->scratch_limit = pdd->scratch_limit;
|
||||
|
||||
dev_dbg(kfd_device,
|
||||
"node id %u\n", args.num_of_nodes);
|
||||
"node id %u\n", args->num_of_nodes);
|
||||
dev_dbg(kfd_device,
|
||||
"gpu id %u\n", pdd->dev->id);
|
||||
dev_dbg(kfd_device,
|
||||
@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
|
||||
dev_dbg(kfd_device,
|
||||
"scratch_limit %llX\n", pdd->scratch_limit);
|
||||
|
||||
args.num_of_nodes++;
|
||||
args->num_of_nodes++;
|
||||
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
|
||||
(args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
|
||||
(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
|
||||
}
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
if (copy_to_user(arg, &args, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
|
||||
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
|
||||
|
||||
/** Ioctl table */
|
||||
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
|
||||
kfd_ioctl_get_version, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
|
||||
kfd_ioctl_create_queue, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
|
||||
kfd_ioctl_destroy_queue, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
|
||||
kfd_ioctl_set_memory_policy, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
|
||||
kfd_ioctl_get_clock_counters, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
|
||||
kfd_ioctl_get_process_apertures, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
|
||||
kfd_ioctl_update_queue, 0),
|
||||
};
|
||||
|
||||
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
|
||||
|
||||
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct kfd_process *process;
|
||||
long err = -EINVAL;
|
||||
amdkfd_ioctl_t *func;
|
||||
const struct amdkfd_ioctl_desc *ioctl = NULL;
|
||||
unsigned int nr = _IOC_NR(cmd);
|
||||
char stack_kdata[128];
|
||||
char *kdata = NULL;
|
||||
unsigned int usize, asize;
|
||||
int retcode = -EINVAL;
|
||||
|
||||
dev_dbg(kfd_device,
|
||||
"ioctl cmd 0x%x (#%d), arg 0x%lx\n",
|
||||
cmd, _IOC_NR(cmd), arg);
|
||||
if (nr >= AMDKFD_CORE_IOCTL_COUNT)
|
||||
goto err_i1;
|
||||
|
||||
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
|
||||
u32 amdkfd_size;
|
||||
|
||||
ioctl = &amdkfd_ioctls[nr];
|
||||
|
||||
amdkfd_size = _IOC_SIZE(ioctl->cmd);
|
||||
usize = asize = _IOC_SIZE(cmd);
|
||||
if (amdkfd_size > asize)
|
||||
asize = amdkfd_size;
|
||||
|
||||
cmd = ioctl->cmd;
|
||||
} else
|
||||
goto err_i1;
|
||||
|
||||
dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
|
||||
|
||||
process = kfd_get_process(current);
|
||||
if (IS_ERR(process))
|
||||
return PTR_ERR(process);
|
||||
|
||||
switch (cmd) {
|
||||
case KFD_IOC_GET_VERSION:
|
||||
err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
|
||||
break;
|
||||
case KFD_IOC_CREATE_QUEUE:
|
||||
err = kfd_ioctl_create_queue(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
case KFD_IOC_DESTROY_QUEUE:
|
||||
err = kfd_ioctl_destroy_queue(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
case KFD_IOC_SET_MEMORY_POLICY:
|
||||
err = kfd_ioctl_set_memory_policy(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
case KFD_IOC_GET_CLOCK_COUNTERS:
|
||||
err = kfd_ioctl_get_clock_counters(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
case KFD_IOC_GET_PROCESS_APERTURES:
|
||||
err = kfd_ioctl_get_process_apertures(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
case KFD_IOC_UPDATE_QUEUE:
|
||||
err = kfd_ioctl_update_queue(filep, process,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(kfd_device,
|
||||
"unknown ioctl cmd 0x%x, arg 0x%lx)\n",
|
||||
cmd, arg);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
if (IS_ERR(process)) {
|
||||
dev_dbg(kfd_device, "no process\n");
|
||||
goto err_i1;
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
dev_err(kfd_device,
|
||||
"ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
|
||||
err, cmd, _IOC_NR(cmd));
|
||||
/* Do not trust userspace, use our own definition */
|
||||
func = ioctl->func;
|
||||
|
||||
return err;
|
||||
if (unlikely(!func)) {
|
||||
dev_dbg(kfd_device, "no function\n");
|
||||
retcode = -EINVAL;
|
||||
goto err_i1;
|
||||
}
|
||||
|
||||
if (cmd & (IOC_IN | IOC_OUT)) {
|
||||
if (asize <= sizeof(stack_kdata)) {
|
||||
kdata = stack_kdata;
|
||||
} else {
|
||||
kdata = kmalloc(asize, GFP_KERNEL);
|
||||
if (!kdata) {
|
||||
retcode = -ENOMEM;
|
||||
goto err_i1;
|
||||
}
|
||||
}
|
||||
if (asize > usize)
|
||||
memset(kdata + usize, 0, asize - usize);
|
||||
}
|
||||
|
||||
if (cmd & IOC_IN) {
|
||||
if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
|
||||
retcode = -EFAULT;
|
||||
goto err_i1;
|
||||
}
|
||||
} else if (cmd & IOC_OUT) {
|
||||
memset(kdata, 0, usize);
|
||||
}
|
||||
|
||||
retcode = func(filep, process, kdata);
|
||||
|
||||
if (cmd & IOC_OUT)
|
||||
if (copy_to_user((void __user *)arg, kdata, usize) != 0)
|
||||
retcode = -EFAULT;
|
||||
|
||||
err_i1:
|
||||
if (!ioctl)
|
||||
dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
|
||||
task_pid_nr(current), cmd, nr);
|
||||
|
||||
if (kdata != stack_kdata)
|
||||
kfree(kdata);
|
||||
|
||||
if (retcode)
|
||||
dev_dbg(kfd_device, "ret = %d\n", retcode);
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
|
||||
{
|
||||
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
|
||||
|
||||
/* Release the vmid mapping */
|
||||
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
|
||||
|
||||
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
|
||||
qpd->vmid = 0;
|
||||
q->properties.vmid = 0;
|
||||
@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
||||
return retval;
|
||||
}
|
||||
|
||||
pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
|
||||
q->pipe,
|
||||
q->queue);
|
||||
|
||||
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
|
||||
q->queue, q->properties.write_ptr);
|
||||
if (retval != 0) {
|
||||
deallocate_hqd(dqm, q);
|
||||
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
|
||||
return retval;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
||||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd;
|
||||
bool prev_active = false;
|
||||
|
||||
BUG_ON(!dqm || !q || !q->mqd);
|
||||
|
||||
@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
|
||||
if (q->properties.is_active == true)
|
||||
prev_active = true;
|
||||
|
||||
/*
|
||||
*
|
||||
* check active state vs. the previous state
|
||||
* and modify counter accordingly
|
||||
*/
|
||||
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
|
||||
if ((q->properties.is_active == true) && (prev_active == false))
|
||||
dqm->queue_count++;
|
||||
else
|
||||
else if ((q->properties.is_active == false) && (prev_active == true))
|
||||
dqm->queue_count--;
|
||||
|
||||
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
|
||||
|
@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
|
||||
uint32_t queue_id)
|
||||
{
|
||||
|
||||
return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
|
||||
return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
|
||||
pipe_id, queue_id);
|
||||
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ int kfd_pasid_init(void)
|
||||
{
|
||||
pasid_limit = max_num_of_processes;
|
||||
|
||||
pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
|
||||
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
|
||||
if (!pasid_bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -463,6 +463,24 @@ struct kfd_process {
|
||||
bool is_32bit_user_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* Ioctl function type.
|
||||
*
|
||||
* \param filep pointer to file structure.
|
||||
* \param p amdkfd process pointer.
|
||||
* \param data pointer to arg that was copied from user.
|
||||
*/
|
||||
typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
|
||||
void *data);
|
||||
|
||||
struct amdkfd_ioctl_desc {
|
||||
unsigned int cmd;
|
||||
int flags;
|
||||
amdkfd_ioctl_t *func;
|
||||
unsigned int cmd_drv;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
void kfd_process_create_wq(void);
|
||||
void kfd_process_destroy_wq(void);
|
||||
struct kfd_process *kfd_create_process(const struct task_struct *);
|
||||
|
@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
|
||||
uint32_t i = 0;
|
||||
|
||||
list_for_each_entry(dev, &topology_device_list, list) {
|
||||
ret = kfd_build_sysfs_node_entry(dev, 0);
|
||||
ret = kfd_build_sysfs_node_entry(dev, i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
i++;
|
||||
|
@ -183,7 +183,7 @@ struct kfd2kgd_calls {
|
||||
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
uint32_t queue_id, uint32_t __user *wptr);
|
||||
|
||||
bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
uint32_t pipe_id, uint32_t queue_id);
|
||||
|
||||
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
|
||||
|
@ -1756,8 +1756,6 @@ struct drm_i915_private {
|
||||
*/
|
||||
struct workqueue_struct *dp_wq;
|
||||
|
||||
uint32_t bios_vgacntr;
|
||||
|
||||
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
||||
struct {
|
||||
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
|
||||
|
@ -1048,6 +1048,7 @@ int
|
||||
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_pwrite *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto put_rpm;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
@ -1121,6 +1124,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
put_rpm:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3725,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
|
||||
if ((iir & flip_pending) == 0)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
@ -3736,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
|
||||
if (I915_READ16(ISR) & flip_pending)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
return true;
|
||||
|
||||
@ -3907,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
|
||||
if ((iir & flip_pending) == 0)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
@ -3918,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
|
||||
if (I915_READ(ISR) & flip_pending)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
return true;
|
||||
|
||||
|
@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
|
||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
udelay(300);
|
||||
|
||||
/*
|
||||
* Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
|
||||
* from S3 without preserving (some of?) the other bits.
|
||||
*/
|
||||
I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
|
||||
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
|
||||
POSTING_READ(vga_reg);
|
||||
}
|
||||
|
||||
@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
|
||||
|
||||
intel_shared_dpll_init(dev);
|
||||
|
||||
/* save the BIOS value before clobbering it */
|
||||
dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
|
||||
/* Just disable it once at startup */
|
||||
i915_disable_vga(dev);
|
||||
intel_setup_outputs(dev);
|
||||
|
@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
static void check_power_well_state(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
|
||||
|
||||
if (power_well->always_on || !i915.disable_power_well) {
|
||||
if (!enabled)
|
||||
goto mismatch;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (enabled != (power_well->count > 0))
|
||||
goto mismatch;
|
||||
|
||||
return;
|
||||
|
||||
mismatch:
|
||||
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
|
||||
power_well->name, power_well->always_on, enabled,
|
||||
power_well->count, i915.disable_power_well);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_display_power_get - grab a power domain reference
|
||||
* @dev_priv: i915 device instance
|
||||
@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
power_well->ops->enable(dev_priv, power_well);
|
||||
power_well->hw_enabled = true;
|
||||
}
|
||||
|
||||
check_power_well_state(dev_priv, power_well);
|
||||
}
|
||||
|
||||
power_domains->domain_use_count[domain]++;
|
||||
@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
power_well->hw_enabled = false;
|
||||
power_well->ops->disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
check_power_well_state(dev_priv, power_well);
|
||||
}
|
||||
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
@ -26,7 +26,7 @@
|
||||
void
|
||||
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
|
||||
{
|
||||
BUG_ON(!spin_is_locked(&event->refs_lock));
|
||||
assert_spin_locked(&event->refs_lock);
|
||||
while (types) {
|
||||
int type = __ffs(types); types &= ~(1 << type);
|
||||
if (--event->refs[index * event->types_nr + type] == 0) {
|
||||
@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
|
||||
void
|
||||
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
|
||||
{
|
||||
BUG_ON(!spin_is_locked(&event->refs_lock));
|
||||
assert_spin_locked(&event->refs_lock);
|
||||
while (types) {
|
||||
int type = __ffs(types); types &= ~(1 << type);
|
||||
if (++event->refs[index * event->types_nr + type] == 1) {
|
||||
|
@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
|
||||
struct nvkm_event *event = notify->event;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!spin_is_locked(&event->list_lock));
|
||||
assert_spin_locked(&event->list_lock);
|
||||
BUG_ON(size != notify->size);
|
||||
|
||||
spin_lock_irqsave(&event->refs_lock, flags);
|
||||
|
@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
|
||||
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
|
||||
break;
|
||||
case 0x106:
|
||||
device->cname = "GK208B";
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
|
||||
device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
|
||||
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
|
||||
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
|
||||
device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
|
||||
device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
|
||||
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
|
||||
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
|
||||
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
|
||||
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
|
||||
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
|
||||
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
|
||||
break;
|
||||
case 0x108:
|
||||
device->cname = "GK208";
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
|
@ -44,8 +44,10 @@ static void
|
||||
pramin_fini(void *data)
|
||||
{
|
||||
struct priv *priv = data;
|
||||
nv_wr32(priv->bios, 0x001700, priv->bar0);
|
||||
kfree(priv);
|
||||
if (priv) {
|
||||
nv_wr32(priv->bios, 0x001700, priv->bar0);
|
||||
kfree(priv);
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -24,34 +24,71 @@
|
||||
|
||||
#include "nv50.h"
|
||||
|
||||
struct nvaa_ram_priv {
|
||||
struct nouveau_ram base;
|
||||
u64 poller_base;
|
||||
};
|
||||
|
||||
static int
|
||||
nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 datasize,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
|
||||
u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
||||
struct nouveau_fb *pfb = nouveau_fb(parent);
|
||||
struct nouveau_ram *ram;
|
||||
struct nvaa_ram_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
ret = nouveau_ram_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->size = nv_rd32(pfb, 0x10020c);
|
||||
ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
|
||||
priv->base.type = NV_MEM_TYPE_STOLEN;
|
||||
priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
|
||||
priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
|
||||
|
||||
ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
|
||||
(rsvd_head + rsvd_tail), 1);
|
||||
rsvd_tail += 0x1000;
|
||||
priv->poller_base = priv->base.size - rsvd_tail;
|
||||
|
||||
ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
|
||||
(priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
|
||||
1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->type = NV_MEM_TYPE_STOLEN;
|
||||
ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
|
||||
ram->get = nv50_ram_get;
|
||||
ram->put = nv50_ram_put;
|
||||
priv->base.get = nv50_ram_get;
|
||||
priv->base.put = nv50_ram_put;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvaa_ram_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_fb *pfb = nouveau_fb(object);
|
||||
struct nvaa_ram_priv *priv = (void *)object;
|
||||
int ret;
|
||||
u64 dniso, hostnb, flush;
|
||||
|
||||
ret = nouveau_ram_init(&priv->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
|
||||
hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
|
||||
flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
|
||||
|
||||
/* Enable NISO poller for various clients and set their associated
|
||||
* read address, only for MCP77/78 and MCP79/7A. (fd#25701)
|
||||
*/
|
||||
nv_wr32(pfb, 0x100c18, dniso);
|
||||
nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
|
||||
nv_wr32(pfb, 0x100c1c, hostnb);
|
||||
nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
|
||||
nv_wr32(pfb, 0x100c24, flush);
|
||||
nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -60,7 +97,7 @@ nvaa_ram_oclass = {
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nvaa_ram_ctor,
|
||||
.dtor = _nouveau_ram_dtor,
|
||||
.init = _nouveau_ram_init,
|
||||
.init = nvaa_ram_init,
|
||||
.fini = _nouveau_ram_fini,
|
||||
},
|
||||
};
|
||||
|
@ -24,13 +24,6 @@
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
static void
|
||||
nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
|
||||
{
|
||||
struct nv04_mc_priv *priv = (void *)pmc;
|
||||
nv_wr08(priv, 0x088050, 0xff);
|
||||
}
|
||||
|
||||
struct nouveau_oclass *
|
||||
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
|
||||
.base.handle = NV_SUBDEV(MC, 0x4c),
|
||||
@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
|
||||
.fini = _nouveau_mc_fini,
|
||||
},
|
||||
.intr = nv04_mc_intr,
|
||||
.msi_rearm = nv4c_mc_msi_rearm,
|
||||
}.base;
|
||||
|
@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
* so use the DMA API for them.
|
||||
*/
|
||||
if (!nv_device_is_cpu_coherent(device) &&
|
||||
ttm->caching_state == tt_uncached)
|
||||
ttm->caching_state == tt_uncached) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (drm->agp.stat == ENABLED) {
|
||||
|
@ -36,7 +36,14 @@ void
|
||||
nouveau_gem_object_del(struct drm_gem_object *gem)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_buffer_object *bo = &nvbo->bo;
|
||||
struct device *dev = drm->dev->dev;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (WARN_ON(ret < 0 && ret != -EACCES))
|
||||
return;
|
||||
|
||||
if (gem->import_attach)
|
||||
drm_prime_gem_destroy(gem, nvbo->bo.sg);
|
||||
@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
||||
/* reset filp so nouveau_bo_del_ttm() can test for it */
|
||||
gem->filp = NULL;
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
|
||||
int
|
||||
@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nouveau_vma *vma;
|
||||
struct device *dev = drm->dev->dev;
|
||||
int ret;
|
||||
|
||||
if (!cli->vm)
|
||||
@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
|
||||
if (ret) {
|
||||
kfree(vma);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
|
||||
if (ret)
|
||||
kfree(vma);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
} else {
|
||||
vma->refcount++;
|
||||
}
|
||||
@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
|
||||
vma = nouveau_bo_vma_find(nvbo, cli->vm);
|
||||
if (vma) {
|
||||
if (--vma->refcount == 0)
|
||||
nouveau_gem_object_unmap(nvbo, vma);
|
||||
if (--vma->refcount == 0) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
|
||||
nouveau_gem_object_unmap(nvbo, vma);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
}
|
||||
|
@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
||||
return pll;
|
||||
}
|
||||
/* otherwise, pick one of the plls */
|
||||
if ((rdev->family == CHIP_KAVERI) ||
|
||||
(rdev->family == CHIP_KABINI) ||
|
||||
if ((rdev->family == CHIP_KABINI) ||
|
||||
(rdev->family == CHIP_MULLINS)) {
|
||||
/* KB/KV/ML has PPLL1 and PPLL2 */
|
||||
/* KB/ML has PPLL1 and PPLL2 */
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else {
|
||||
/* CI has PPLL0, PPLL1, and PPLL2 */
|
||||
/* CI/KV has PPLL0, PPLL1, and PPLL2 */
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
|
||||
case ATOM_PPLL0:
|
||||
/* disable the ppll */
|
||||
if ((rdev->family == CHIP_ARUBA) ||
|
||||
(rdev->family == CHIP_KAVERI) ||
|
||||
(rdev->family == CHIP_BONAIRE) ||
|
||||
(rdev->family == CHIP_HAWAII))
|
||||
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
|
||||
|
@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
int dp_clock;
|
||||
|
||||
if ((mode->clock > 340000) &&
|
||||
(!radeon_connector_is_dp12_capable(connector)))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (!radeon_connector->con_priv)
|
||||
return MODE_CLOCK_HIGH;
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
|
@ -2156,4 +2156,6 @@
|
||||
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
|
||||
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
|
||||
|
||||
#define IH_VMID_0_LUT 0x3D40u
|
||||
|
||||
#endif
|
||||
|
@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||
if (sad_count < 0) {
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||
return;
|
||||
}
|
||||
|
@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
|
||||
pi->enable_auto_thermal_throttling = true;
|
||||
pi->disable_nb_ps3_in_battery = false;
|
||||
if (radeon_bapm == -1) {
|
||||
/* There are stability issues reported on with
|
||||
* bapm enabled on an asrock system.
|
||||
*/
|
||||
if (rdev->pdev->subsystem_vendor == 0x1849)
|
||||
pi->bapm_enable = false;
|
||||
else
|
||||
/* only enable bapm on KB, ML by default */
|
||||
if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
|
||||
pi->bapm_enable = true;
|
||||
else
|
||||
pi->bapm_enable = false;
|
||||
} else if (radeon_bapm == 0) {
|
||||
pi->bapm_enable = false;
|
||||
} else {
|
||||
|
@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
||||
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
uint32_t queue_id, uint32_t __user *wptr);
|
||||
|
||||
static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
uint32_t pipe_id, uint32_t queue_id);
|
||||
|
||||
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
|
||||
@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
||||
.init_memory = kgd_init_memory,
|
||||
.init_pipeline = kgd_init_pipeline,
|
||||
.hqd_load = kgd_hqd_load,
|
||||
.hqd_is_occupies = kgd_hqd_is_occupies,
|
||||
.hqd_is_occupied = kgd_hqd_is_occupied,
|
||||
.hqd_destroy = kgd_hqd_destroy,
|
||||
.get_fw_version = get_fw_version
|
||||
};
|
||||
@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd;
|
||||
|
||||
bool radeon_kfd_init(void)
|
||||
{
|
||||
#if defined(CONFIG_HSA_AMD_MODULE)
|
||||
bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
|
||||
const struct kgd2kfd_calls**);
|
||||
|
||||
@ -117,6 +118,17 @@ bool radeon_kfd_init(void)
|
||||
}
|
||||
|
||||
return true;
|
||||
#elif defined(CONFIG_HSA_AMD)
|
||||
if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
|
||||
kgd2kfd = NULL;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void radeon_kfd_fini(void)
|
||||
@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
||||
cpu_relax();
|
||||
write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
|
||||
|
||||
/* Mapping vmid to pasid also for IH block */
|
||||
write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
|
||||
pasid_mapping);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
uint32_t pipe_id, uint32_t queue_id)
|
||||
{
|
||||
uint32_t act;
|
||||
@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
|
||||
if (timeout == 0) {
|
||||
pr_err("kfd: cp queue preemption time out (%dms)\n",
|
||||
temp);
|
||||
release_queue(kgd);
|
||||
return -ETIME;
|
||||
}
|
||||
msleep(20);
|
||||
|
@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||
u32 format;
|
||||
u32 *buffer;
|
||||
const u8 __user *data;
|
||||
int size, dwords, tex_width, blit_width, spitch;
|
||||
unsigned int size, dwords, tex_width, blit_width, spitch;
|
||||
u32 height;
|
||||
int i;
|
||||
u32 texpitch, microtile;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user