Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2018-02-24 00:04:20 -05:00
commit f74290fdb3
320 changed files with 2316 additions and 1386 deletions

4
.gitignore vendored
View File

@ -127,3 +127,7 @@ all.config
# Kdevelop4 # Kdevelop4
*.kdev4 *.kdev4
#Automatically generated by ASN.1 compiler
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h

View File

@ -3,4 +3,4 @@
================================== ==================================
.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c .. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
:doc: Faraday TV Encoder 200 :doc: Faraday TV Encoder TVE200 DRM Driver

View File

@ -28,8 +28,10 @@ Supported adapters:
* Intel Wildcat Point (PCH) * Intel Wildcat Point (PCH)
* Intel Wildcat Point-LP (PCH) * Intel Wildcat Point-LP (PCH)
* Intel BayTrail (SOC) * Intel BayTrail (SOC)
* Intel Braswell (SOC)
* Intel Sunrise Point-H (PCH) * Intel Sunrise Point-H (PCH)
* Intel Sunrise Point-LP (PCH) * Intel Sunrise Point-LP (PCH)
* Intel Kaby Lake-H (PCH)
* Intel DNV (SOC) * Intel DNV (SOC)
* Intel Broxton (SOC) * Intel Broxton (SOC)
* Intel Lewisburg (PCH) * Intel Lewisburg (PCH)

View File

@ -7909,7 +7909,6 @@ S: Maintained
F: scripts/leaking_addresses.pl F: scripts/leaking_addresses.pl
LED SUBSYSTEM LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
M: Jacek Anaszewski <jacek.anaszewski@gmail.com> M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
M: Pavel Machek <pavel@ucw.cz> M: Pavel Machek <pavel@ucw.cz>
L: linux-leds@vger.kernel.org L: linux-leds@vger.kernel.org

View File

@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
#define BUG() do { \ #define BUG() do { \
pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
dump_stack(); \ barrier_before_unreachable(); \
__builtin_trap(); \
} while (0) } while (0)
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG

View File

@ -23,7 +23,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/perf/arm_pmu.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <asm/outercache.h> #include <asm/outercache.h>
@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
prcmu_system_reset(0); prcmu_system_reset(0);
} }
/*
* The PMU IRQ lines of two cores are wired together into a single interrupt.
* Bounce the interrupt to the other core if it's not ours.
*/
static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
{
irqreturn_t ret = handler(irq, dev);
int other = !smp_processor_id();
if (ret == IRQ_NONE && cpu_online(other))
irq_set_affinity(irq, cpumask_of(other));
/*
* We should be able to get away with the amount of IRQ_NONEs we give,
* while still having the spurious IRQ detection code kick in if the
* interrupt really starts hitting spuriously.
*/
return ret;
}
static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
.irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
};
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
{},
};
static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = { static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL), OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
{}, {},
@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
if (of_machine_is_compatible("st-ericsson,u8540")) if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes, of_platform_populate(NULL, u8500_local_bus_nodes,
u8540_auxdata_lookup, NULL); u8540_auxdata_lookup, NULL);
else
of_platform_populate(NULL, u8500_local_bus_nodes,
u8500_auxdata_lookup, NULL);
} }
static const char * stericsson_dt_platform_compat[] = { static const char * stericsson_dt_platform_compat[] = {

View File

@ -20,7 +20,7 @@
#define MPIDR_UP_BITMASK (0x1 << 30) #define MPIDR_UP_BITMASK (0x1 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24) #define MPIDR_MT_BITMASK (0x1 << 24)
#define MPIDR_HWID_BITMASK 0xff00ffffffUL #define MPIDR_HWID_BITMASK UL(0xff00ffffff)
#define MPIDR_LEVEL_BITS_SHIFT 3 #define MPIDR_LEVEL_BITS_SHIFT 3
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) #define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)

View File

@ -28,7 +28,7 @@ struct stackframe {
unsigned long fp; unsigned long fp;
unsigned long pc; unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int graph; int graph;
#endif #endif
}; };

View File

@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
* This is equivalent to the following test: * This is equivalent to the following test:
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/ */
static inline unsigned long __range_ok(unsigned long addr, unsigned long size) static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
{ {
unsigned long limit = current_thread_info()->addr_limit; unsigned long ret, limit = current_thread_info()->addr_limit;
__chk_user_ptr(addr); __chk_user_ptr(addr);
asm volatile( asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps: // A + B <= C + 1 for all A,B,C, in four easy steps:
// 1: X = A + B; X' = X % 2^64 // 1: X = A + B; X' = X % 2^64
" adds %0, %0, %2\n" " adds %0, %3, %2\n"
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
" csel %1, xzr, %1, hi\n" " csel %1, xzr, %1, hi\n"
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
// testing X' - C == 0, subject to the previous adjustments. // testing X' - C == 0, subject to the previous adjustments.
" sbcs xzr, %0, %1\n" " sbcs xzr, %0, %1\n"
" cset %0, ls\n" " cset %0, ls\n"
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
return addr; return ret;
} }
/* /*
@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
*/ */
#define untagged_addr(addr) sign_extend64(addr, 55) #define untagged_addr(addr) sign_extend64(addr, 55)
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs #define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \ #define _ASM_EXTABLE(from, to) \

View File

@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
static int swp_handler(struct pt_regs *regs, u32 instr) static int swp_handler(struct pt_regs *regs, u32 instr)
{ {
u32 destreg, data, type, address = 0; u32 destreg, data, type, address = 0;
const void __user *user_ptr;
int rn, rt2, res = 0; int rn, rt2, res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */ /* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { user_ptr = (const void __user *)(unsigned long)(address & ~3);
if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
address); address);
goto fault; goto fault;

View File

@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
}; };
static const struct arm64_ftr_bits ftr_ctr[] = { static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/* /*
* Linux can handle differing I-cache policies. Userspace JITs will * Linux can handle differing I-cache policies. Userspace JITs will

View File

@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
int pmuver; int pmuver;
dfr0 = read_sysreg(id_aa64dfr0_el1); dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_signed_field(dfr0, pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT); ID_AA64DFR0_PMUVER_SHIFT);
if (pmuver < 1) if (pmuver == 0xf || pmuver == 0)
return; return;
probe->present = true; probe->present = true;

View File

@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT); show_regs_print_info(KERN_DEFAULT);
print_pstate(regs); print_pstate(regs);
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr); if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr);
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
}
printk("sp : %016llx\n", sp); printk("sp : %016llx\n", sp);
i = top_reg; i = top_reg;

View File

@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack && if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) { (frame->pc == (unsigned long)return_to_handler)) {
if (WARN_ON_ONCE(frame->graph == -1))
return -EINVAL;
if (frame->graph < -1)
frame->graph += FTRACE_NOTRACE_DEPTH;
/* /*
* This is a case where function graph tracer has * This is a case where function graph tracer has
* modified a return address (LR) in a stack frame * modified a return address (LR) in a stack frame

View File

@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags) if (end < start || flags)
return -EINVAL; return -EINVAL;
if (!access_ok(VERIFY_READ, start, end - start)) if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
return -EFAULT; return -EFAULT;
return __do_compat_cache_op(start, end); return __do_compat_cache_op(start, end);

View File

@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29]; frame.fp = regs->regs[29];
frame.pc = regs->pc; frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = -1; /* no task info */ frame.graph = current->curr_ret_stack;
#endif #endif
do { do {
int ret = unwind_frame(NULL, &frame); int ret = unwind_frame(NULL, &frame);

View File

@ -57,7 +57,7 @@ static const char *handler[]= {
"Error" "Error"
}; };
int show_unhandled_signals = 1; int show_unhandled_signals = 0;
static void dump_backtrace_entry(unsigned long where) static void dump_backtrace_entry(unsigned long where)
{ {
@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
} }
#endif #endif
if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), regs->syscallno);
dump_instr("", regs);
if (user_mode(regs))
__show_regs(regs);
}
return sys_ni_syscall(); return sys_ni_syscall();
} }

View File

@ -933,6 +933,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{ {
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot))); pgprot_val(mk_sect_prot(prot)));
/* ioremap_page_range doesn't honour BBM */
if (pud_present(READ_ONCE(*pudp)))
return 0;
BUG_ON(phys & ~PUD_MASK); BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1; return 1;
@ -942,6 +947,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{ {
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot))); pgprot_val(mk_sect_prot(prot)));
/* ioremap_page_range doesn't honour BBM */
if (pmd_present(READ_ONCE(*pmdp)))
return 0;
BUG_ON(phys & ~PMD_MASK); BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1; return 1;

View File

@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
off = offsetof(struct bpf_array, map.max_entries); off = offsetof(struct bpf_array, map.max_entries);
emit_a64_mov_i64(tmp, off, ctx); emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR32(tmp, r2, tmp), ctx); emit(A64_LDR32(tmp, r2, tmp), ctx);
emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx); emit(A64_CMP(0, r3, tmp), ctx);
emit(A64_B_(A64_COND_GE, jmp_offset), ctx); emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT) /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out; * goto out;
@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/ */
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
emit(A64_CMP(1, tcc, tmp), ctx); emit(A64_CMP(1, tcc, tmp), ctx);
emit(A64_B_(A64_COND_GT, jmp_offset), ctx); emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
emit(A64_ADD_I(1, tcc, tcc, 1), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index]; /* prog = array->ptrs[index];

View File

@ -44,18 +44,25 @@ struct bug_frame {
* not be used like this with newer versions of gcc. * not be used like this with newer versions of gcc.
*/ */
#define BUG() \ #define BUG() \
do { \
__asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
"movu.w " __stringify(__LINE__) ",$r0\n\t"\ "movu.w " __stringify(__LINE__) ",$r0\n\t"\
"jump 0f\n\t" \ "jump 0f\n\t" \
".section .rodata\n" \ ".section .rodata\n" \
"0:\t.string \"" __FILE__ "\"\n\t" \ "0:\t.string \"" __FILE__ "\"\n\t" \
".previous") ".previous"); \
unreachable(); \
} while (0)
#endif #endif
#else #else
/* This just causes an oops. */ /* This just causes an oops. */
#define BUG() (*(int *)0 = 0) #define BUG() \
do { \
barrier_before_unreachable(); \
__builtin_trap(); \
} while (0)
#endif #endif

View File

@ -4,7 +4,11 @@
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
#define ia64_abort() __builtin_trap() #define ia64_abort() __builtin_trap()
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) #define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
barrier_before_unreachable(); \
ia64_abort(); \
} while (0)
/* should this BUG be made generic? */ /* should this BUG be made generic? */
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG

View File

@ -8,16 +8,19 @@
#ifndef CONFIG_SUN3 #ifndef CONFIG_SUN3
#define BUG() do { \ #define BUG() do { \
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
barrier_before_unreachable(); \
__builtin_trap(); \ __builtin_trap(); \
} while (0) } while (0)
#else #else
#define BUG() do { \ #define BUG() do { \
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
barrier_before_unreachable(); \
panic("BUG!"); \ panic("BUG!"); \
} while (0) } while (0)
#endif #endif
#else #else
#define BUG() do { \ #define BUG() do { \
barrier_before_unreachable(); \
__builtin_trap(); \ __builtin_trap(); \
} while (0) } while (0)
#endif #endif

View File

@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
quiet_cmd_cpp_its_S = ITS $@ quiet_cmd_cpp_its_S = ITS $@
cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
-D__ASSEMBLY__ \
-DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
-DVMLINUX_BINARY="\"$(3)\"" \ -DVMLINUX_BINARY="\"$(3)\"" \
-DVMLINUX_COMPRESSION="\"$(2)\"" \ -DVMLINUX_COMPRESSION="\"$(2)\"" \

View File

@ -86,7 +86,6 @@ struct compat_flock {
compat_off_t l_len; compat_off_t l_len;
s32 l_sysid; s32 l_sysid;
compat_pid_t l_pid; compat_pid_t l_pid;
short __unused;
s32 pad[4]; s32 pad[4];
}; };

View File

@ -8,7 +8,6 @@ config RISCV
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_IRQ select OF_IRQ
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
@ -20,7 +19,6 @@ config RISCV
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
@ -34,7 +32,6 @@ config RISCV
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select THREAD_INFO_IN_TASK select THREAD_INFO_IN_TASK
select RISCV_IRQ_INTC
select RISCV_TIMER select RISCV_TIMER
config MMU config MMU

View File

@ -172,6 +172,9 @@ ENTRY(handle_exception)
move a1, sp /* pt_regs */ move a1, sp /* pt_regs */
tail do_IRQ tail do_IRQ
1: 1:
/* Exceptions run with interrupts enabled */
csrs sstatus, SR_SIE
/* Handle syscalls */ /* Handle syscalls */
li t0, EXC_SYSCALL li t0, EXC_SYSCALL
beq s4, t0, handle_syscall beq s4, t0, handle_syscall
@ -198,8 +201,6 @@ handle_syscall:
*/ */
addi s2, s2, 0x4 addi s2, s2, 0x4
REG_S s2, PT_SEPC(sp) REG_S s2, PT_SEPC(sp)
/* System calls run with interrupts enabled */
csrs sstatus, SR_SIE
/* Trace syscalls, but only if requested by the user. */ /* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp) REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_TRACE andi t0, t0, _TIF_SYSCALL_TRACE

View File

@ -64,7 +64,7 @@ ENTRY(_start)
/* Start the kernel */ /* Start the kernel */
mv a0, s0 mv a0, s0
mv a1, s1 mv a1, s1
call sbi_save call parse_dtb
tail start_kernel tail start_kernel
relocate: relocate:

View File

@ -144,7 +144,7 @@ asmlinkage void __init setup_vm(void)
#endif #endif
} }
void __init sbi_save(unsigned int hartid, void *dtb) void __init parse_dtb(unsigned int hartid, void *dtb)
{ {
early_init_dt_scan(__va(dtb)); early_init_dt_scan(__va(dtb));
} }

View File

@ -9,10 +9,14 @@
void do_BUG(const char *file, int line); void do_BUG(const char *file, int line);
#define BUG() do { \ #define BUG() do { \
do_BUG(__FILE__, __LINE__); \ do_BUG(__FILE__, __LINE__); \
barrier_before_unreachable(); \
__builtin_trap(); \ __builtin_trap(); \
} while (0) } while (0)
#else #else
#define BUG() __builtin_trap() #define BUG() do { \
barrier_before_unreachable(); \
__builtin_trap(); \
} while (0)
#endif #endif
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG

View File

@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/*
* Below is used in the eBPF JIT compiler and emits the byte sequence
* for the following assembly:
*
* With retpolines configured:
*
* callq do_rop
* spec_trap:
* pause
* lfence
* jmp spec_trap
* do_rop:
* mov %rax,(%rsp)
* retq
*
* Without retpolines configured:
*
* jmp *%rax
*/
#ifdef CONFIG_RETPOLINE
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
# define RETPOLINE_RAX_BPF_JIT() \
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
/* spec_trap: */ \
EMIT2(0xF3, 0x90); /* pause */ \
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
/* do_rop: */ \
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
EMIT1(0xC3); /* retq */
#else
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
# define RETPOLINE_RAX_BPF_JIT() \
EMIT2(0xFF, 0xE0); /* jmp *%rax */
#endif
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */

View File

@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
goto overflow; goto overflow;
break; break;
case R_X86_64_PC32: case R_X86_64_PC32:
case R_X86_64_PLT32:
value -= (u64)address; value -= (u64)address;
*(u32 *)location = value; *(u32 *)location = value;
break; break;

View File

@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
goto overflow; goto overflow;
break; break;
case R_X86_64_PC32: case R_X86_64_PC32:
case R_X86_64_PLT32:
if (*(u32 *)loc != 0) if (*(u32 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
val -= (u64)loc; val -= (u64)loc;

View File

@ -13,6 +13,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <linux/bpf.h> #include <linux/bpf.h>
/* /*
@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries)); offsetof(struct bpf_array, map.max_entries));
#define OFFSET1 43 /* number of bytes to jump */ #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */ EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt; label1 = cnt;
@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
*/ */
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 32 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */ EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt; label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* goto out; * goto out;
*/ */
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
#define OFFSET3 10 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */ EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt; label3 = cnt;
@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* rdi == ctx (1st arg) * rdi == ctx (1st arg)
* rax == prog->bpf_func + prologue_size * rax == prog->bpf_func + prologue_size
*/ */
EMIT2(0xFF, 0xE0); /* jmp rax */ RETPOLINE_RAX_BPF_JIT();
/* out: */ /* out: */
BUILD_BUG_ON(cnt - label1 != OFFSET1); BUILD_BUG_ON(cnt - label1 != OFFSET1);

View File

@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
break; break;
case R_X86_64_PC32: case R_X86_64_PC32:
case R_X86_64_PLT32:
/* /*
* PC relative relocations don't need to be adjusted unless * PC relative relocations don't need to be adjusted unless
* referencing a percpu symbol. * referencing a percpu symbol.
*
* NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
*/ */
if (is_percpu_sym(sym, symname)) if (is_percpu_sym(sym, symname))
add_reloc(&relocs32neg, offset); add_reloc(&relocs32neg, offset);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "blacklist.h" #include "blacklist.h"
const char __initdata *const blacklist_hashes[] = { const char __initconst *const blacklist_hashes[] = {
NULL NULL
}; };

View File

@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
pr_devel("sinfo %u: Direct signer is key %x\n", pr_devel("sinfo %u: Direct signer is key %x\n",
sinfo->index, key_serial(key)); sinfo->index, key_serial(key));
x509 = NULL; x509 = NULL;
sig = sinfo->sig;
goto matched; goto matched;
} }
if (PTR_ERR(key) != -ENOKEY) if (PTR_ERR(key) != -ENOKEY)

View File

@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
sinfo->index); sinfo->index);
return 0; return 0;
} }
ret = public_key_verify_signature(p->pub, p->sig); ret = public_key_verify_signature(p->pub, x509->sig);
if (ret < 0) if (ret < 0)
return ret; return ret;
x509->signer = p; x509->signer = p;
@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
* *
* (*) -EBADMSG if some part of the message was invalid, or: * (*) -EBADMSG if some part of the message was invalid, or:
* *
* (*) 0 if no signature chains were found to be blacklisted or to contain * (*) 0 if a signature chain passed verification, or:
* unsupported crypto, or:
* *
* (*) -EKEYREJECTED if a blacklisted key was encountered, or: * (*) -EKEYREJECTED if a blacklisted key was encountered, or:
* *
@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_verify_one(pkcs7, sinfo); ret = pkcs7_verify_one(pkcs7, sinfo);
if (sinfo->blacklisted && actual_ret == -ENOPKG) if (sinfo->blacklisted) {
actual_ret = -EKEYREJECTED; if (actual_ret == -ENOPKG)
actual_ret = -EKEYREJECTED;
continue;
}
if (ret < 0) { if (ret < 0) {
if (ret == -ENOPKG) { if (ret == -ENOPKG) {
sinfo->unsupported_crypto = true; sinfo->unsupported_crypto = true;

View File

@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey,
BUG_ON(!pkey); BUG_ON(!pkey);
BUG_ON(!sig); BUG_ON(!sig);
BUG_ON(!sig->digest);
BUG_ON(!sig->s); BUG_ON(!sig->s);
if (!sig->digest)
return -ENOPKG;
alg_name = sig->pkey_algo; alg_name = sig->pkey_algo;
if (strcmp(sig->pkey_algo, "rsa") == 0) { if (strcmp(sig->pkey_algo, "rsa") == 0) {
/* The data wangled by the RSA algorithm is typically padded /* The data wangled by the RSA algorithm is typically padded

View File

@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup);
* *
* Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
* matching parent certificate in the trusted list, -EKEYREJECTED if the * matching parent certificate in the trusted list, -EKEYREJECTED if the
* signature check fails or the key is blacklisted and some other error if * signature check fails or the key is blacklisted, -ENOPKG if the signature
* there is a matching certificate but the signature check cannot be performed. * uses unsupported crypto, or some other error if there is a matching
* certificate but the signature check cannot be performed.
*/ */
int restrict_link_by_signature(struct key *dest_keyring, int restrict_link_by_signature(struct key *dest_keyring,
const struct key_type *type, const struct key_type *type,
@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring,
return -EOPNOTSUPP; return -EOPNOTSUPP;
sig = payload->data[asym_auth]; sig = payload->data[asym_auth];
if (!sig)
return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1]) if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY; return -ENOKEY;
@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring,
return -EOPNOTSUPP; return -EOPNOTSUPP;
sig = payload->data[asym_auth]; sig = payload->data[asym_auth];
if (!sig)
return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1]) if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY; return -ENOKEY;
@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring,
* *
* Returns 0 if the new certificate was accepted, -ENOKEY if we * Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list, * couldn't find a matching parent certificate in the trusted list,
* -EKEYREJECTED if the signature check fails, and some other error if * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
* there is a matching certificate but the signature check cannot be * unsupported crypto, or some other error if there is a matching certificate
* performed. * but the signature check cannot be performed.
*/ */
int restrict_link_by_key_or_keyring(struct key *dest_keyring, int restrict_link_by_key_or_keyring(struct key *dest_keyring,
const struct key_type *type, const struct key_type *type,
@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
* *
* Returns 0 if the new certificate was accepted, -ENOKEY if we * Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list, * couldn't find a matching parent certificate in the trusted list,
* -EKEYREJECTED if the signature check fails, and some other error if * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
* there is a matching certificate but the signature check cannot be * unsupported crypto, or some other error if there is a matching certificate
* performed. * but the signature check cannot be performed.
*/ */
int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
const struct key_type *type, const struct key_type *type,

View File

@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
&target_thread->reply_error.work); &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait); wake_up_interruptible(&target_thread->wait);
} else { } else {
WARN(1, "Unexpected reply error: %u\n", /*
target_thread->reply_error.cmd); * Cannot get here for normal operation, but
* we can if multiple synchronous transactions
* are sent without blocking for responses.
* Just ignore the 2nd error in this case.
*/
pr_warn("Unexpected reply error: %u\n",
target_thread->reply_error.cmd);
} }
binder_inner_proc_unlock(target_thread->proc); binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread); binder_thread_dec_tmpref(target_thread);
@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id; int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION, binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %p\n", "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id, proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at); buffer->data_size, buffer->offsets_size, failed_at);
@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,
} }
} }
binder_debug(BINDER_DEBUG_DEAD_BINDER, binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie, proc->pid, thread->pid, (u64)cookie,
death); death);
if (death == NULL) { if (death == NULL) {
@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc,
binder_inner_proc_unlock(thread->proc); binder_inner_proc_unlock(thread->proc);
/*
* This is needed to avoid races between wake_up_poll() above and
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
* lock, so we can be sure it's done after calling synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();
if (send_reply) if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo); binder_release_work(proc, &thread->todo);
@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp,
bool wait_for_proc_work; bool wait_for_proc_work;
thread = binder_get_thread(proc); thread = binder_get_thread(proc);
if (!thread)
return POLLERR;
binder_inner_proc_lock(thread->proc); binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL; thread->looper |= BINDER_LOOPER_STATE_POLL;
@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock); spin_lock(&t->lock);
to_proc = t->to_proc; to_proc = t->to_proc;
seq_printf(m, seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t, prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, t->from ? t->from->pid : 0,
@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
} }
if (buffer->target_node) if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n", seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size, buffer->data_size, buffer->offsets_size,
buffer->data); buffer->data);
} }

View File

@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
uint32_t aes_control; uint32_t aes_control;
unsigned long flags; unsigned long flags;
int err; int err;
u8 *iv;
aes_control = SSS_AES_KEY_CHANGE_MODE; aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT) if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT; aes_control |= SSS_AES_MODE_DECRYPT;
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC; aes_control |= SSS_AES_CHAIN_MODE_CBC;
else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) iv = req->info;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR; aes_control |= SSS_AES_CHAIN_MODE_CTR;
iv = req->info;
} else {
iv = NULL; /* AES_ECB */
}
if (dev->ctx->keylen == AES_KEYSIZE_192) if (dev->ctx->keylen == AES_KEYSIZE_192)
aes_control |= SSS_AES_KEY_SIZE_192; aes_control |= SSS_AES_KEY_SIZE_192;
@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error; goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control); SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src); s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst); s5p_set_dma_outdata(dev, dev->sg_dst);

View File

@ -1,7 +1,6 @@
/* /*
* extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
* *
* Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2015 Intel Corporation * Copyright (C) 2015 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
* *
@ -98,15 +97,13 @@ struct axp288_extcon_info {
struct device *dev; struct device *dev;
struct regmap *regmap; struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc; struct regmap_irq_chip_data *regmap_irqc;
struct delayed_work det_work;
int irq[EXTCON_IRQ_END]; int irq[EXTCON_IRQ_END];
struct extcon_dev *edev; struct extcon_dev *edev;
unsigned int previous_cable; unsigned int previous_cable;
bool first_detect_done;
}; };
/* Power up/down reason string array */ /* Power up/down reason string array */
static char *axp288_pwr_up_down_info[] = { static const char * const axp288_pwr_up_down_info[] = {
"Last wake caused by user pressing the power button", "Last wake caused by user pressing the power button",
"Last wake caused by a charger insertion", "Last wake caused by a charger insertion",
"Last wake caused by a battery insertion", "Last wake caused by a battery insertion",
@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = {
*/ */
static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
{ {
char **rsi; const char * const *rsi;
unsigned int val, i, clear_mask = 0; unsigned int val, i, clear_mask = 0;
int ret; int ret;
@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
} }
static void axp288_chrg_detect_complete(struct axp288_extcon_info *info)
{
/*
* We depend on other drivers to do things like mux the data lines,
* enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has
* not set these things up correctly resulting in the initial charger
* cable type detection giving a wrong result and we end up not charging
* or charging at only 0.5A.
*
* So we schedule a second cable type detection after 2 seconds to
* give the other drivers time to load and do their thing.
*/
if (!info->first_detect_done) {
queue_delayed_work(system_wq, &info->det_work,
msecs_to_jiffies(2000));
info->first_detect_done = true;
}
}
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
{ {
int ret, stat, cfg, pwr_stat; int ret, stat, cfg, pwr_stat;
@ -223,8 +201,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
info->previous_cable = cable; info->previous_cable = cable;
} }
axp288_chrg_detect_complete(info);
return 0; return 0;
dev_det_ret: dev_det_ret:
@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void axp288_extcon_det_work(struct work_struct *work) static void axp288_extcon_enable(struct axp288_extcon_info *info)
{ {
struct axp288_extcon_info *info =
container_of(work, struct axp288_extcon_info, det_work.work);
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0); BC_GLOBAL_RUN, 0);
/* Enable the charger detection logic */ /* Enable the charger detection logic */
@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap; info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc; info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE; info->previous_cable = EXTCON_NONE;
INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);
platform_set_drvdata(pdev, info); platform_set_drvdata(pdev, info);
@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
} }
/* Start charger cable type detection */ /* Start charger cable type detection */
queue_delayed_work(system_wq, &info->det_work, 0); axp288_extcon_enable(info);
return 0; return 0;
} }

View File

@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev)
return ret; return ret;
} }
/* queue initial processing of id-pin */ /* process id-pin so that we start with the right status */
queue_delayed_work(system_wq, &data->work, 0); queue_delayed_work(system_wq, &data->work, 0);
flush_delayed_work(&data->work);
platform_set_drvdata(pdev, data); platform_set_drvdata(pdev, data);

View File

@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
int r; int r;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (encoder) { if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */ /* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret); amdgpu_connector_update_scratch_regs(connector, ret);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
int r; int r;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
encoder = amdgpu_connector_best_single_encoder(connector); encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder) if (!encoder)
@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret); amdgpu_connector_update_scratch_regs(connector, ret);
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false; bool dret = false, broken_edid = false;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status; ret = connector->status;
@ -1115,8 +1127,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret); amdgpu_connector_update_scratch_regs(connector, ret);
exit: exit:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r; int r;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status; ret = connector->status;
@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret); amdgpu_connector_update_scratch_regs(connector, ret);
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }

View File

@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
{ {
} }
/* static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
* This is called after a mode is programmed. It should reverse anything done
* by the prepare function
*/
static void cirrus_crtc_commit(struct drm_crtc *crtc)
{
}
/*
* The core can pass us a set of gamma values to program. We actually only
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct cirrus_device *cdev = dev->dev_private; struct cirrus_device *cdev = dev->dev_private;
@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
int i; int i;
if (!crtc->enabled) if (!crtc->enabled)
return 0; return;
r = crtc->gamma_store; r = crtc->gamma_store;
g = r + crtc->gamma_size; g = r + crtc->gamma_size;
@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
WREG8(PALETTE_DATA, *g++ >> 8); WREG8(PALETTE_DATA, *g++ >> 8);
WREG8(PALETTE_DATA, *b++ >> 8); WREG8(PALETTE_DATA, *b++ >> 8);
} }
}
/*
* This is called after a mode is programmed. It should reverse anything done
* by the prepare function
*/
static void cirrus_crtc_commit(struct drm_crtc *crtc)
{
cirrus_crtc_load_lut(crtc);
}
/*
* The core can pass us a set of gamma values to program. We actually only
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
cirrus_crtc_load_lut(crtc);
return 0; return 0;
} }

View File

@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_crtc_state->event->base.completion = &commit->flip_done; new_crtc_state->event->base.completion = &commit->flip_done;
new_crtc_state->event->base.completion_release = release_crtc_commit; new_crtc_state->event->base.completion_release = release_crtc_commit;
drm_crtc_commit_get(commit); drm_crtc_commit_get(commit);
commit->abort_completion = true;
} }
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{ {
if (state->commit) { if (state->commit) {
/*
* In the event that a non-blocking commit returns
* -ERESTARTSYS before the commit_tail work is queued, we will
* have an extra reference to the commit object. Release it, if
* the event has not been consumed by the worker.
*
* state->event may be freed, so we can't directly look at
* state->event->base.completion.
*/
if (state->event && state->commit->abort_completion)
drm_crtc_commit_put(state->commit);
kfree(state->commit->event); kfree(state->commit->event);
state->commit->event = NULL; state->commit->event = NULL;
drm_crtc_commit_put(state->commit); drm_crtc_commit_put(state->commit);
} }

View File

@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC }, { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
/* Belinea 10 15 55 */ /* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@ -162,6 +165,24 @@ static const struct edid_quirk {
/* HTC Vive VR Headset */ /* HTC Vive VR Headset */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
/* Windows Mixed Reality Headsets */
{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
{ "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
{ "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
{ "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
{ "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
{ "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
{ "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
{ "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
/* Sony PlayStation VR Headset */
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
}; };
/* /*

View File

@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
if (!mm->color_adjust) if (!mm->color_adjust)
return NULL; return NULL;
hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); /*
hole_start = __drm_mm_hole_node_start(hole); * The hole found during scanning should ideally be the first element
hole_end = hole_start + hole->hole_size; * in the hole_stack list, but due to side-effects in the driver it
* may not be.
*/
list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
hole_start = __drm_mm_hole_node_start(hole);
hole_end = hole_start + hole->hole_size;
if (hole_start <= scan->hit_start &&
hole_end >= scan->hit_end)
break;
}
/* We should only be called after we found the hole previously */
DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
if (unlikely(&hole->hole_stack == &mm->hole_stack))
return NULL;
DRM_MM_BUG_ON(hole_start > scan->hit_start); DRM_MM_BUG_ON(hole_start > scan->hit_start);
DRM_MM_BUG_ON(hole_end < scan->hit_end); DRM_MM_BUG_ON(hole_end < scan->hit_end);

View File

@ -653,6 +653,26 @@ static void output_poll_execute(struct work_struct *work)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
} }
/**
* drm_kms_helper_is_poll_worker - is %current task an output poll worker?
*
* Determine if %current task is an output poll worker. This can be used
* to select distinct code paths for output polling versus other contexts.
*
* One use case is to avoid a deadlock between the output poll worker and
* the autosuspend worker wherein the latter waits for polling to finish
* upon calling drm_kms_helper_poll_disable(), while the former waits for
* runtime suspend to finish upon calling pm_runtime_get_sync() in a
* connector ->detect hook.
*/
bool drm_kms_helper_is_poll_worker(void)
{
struct work_struct *work = current_work();
return work && work->func == output_poll_execute;
}
EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
/** /**
* drm_kms_helper_poll_disable - disable output polling * drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device * @dev: drm_device

View File

@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) { if (!node) {
dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct drm_device *drm_dev = g2d->subdrv.drm_dev; struct drm_device *drm_dev = g2d->subdrv.drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e; struct drm_exynos_pending_g2d_event *e;
struct timeval now; struct timespec64 now;
if (list_empty(&runqueue_node->event_list)) if (list_empty(&runqueue_node->event_list))
return; return;
@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e = list_first_entry(&runqueue_node->event_list, e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link); struct drm_exynos_pending_g2d_event, base.link);
do_gettimeofday(&now); ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec; e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no; e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base); drm_send_event(drm_dev, &e->base);
@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
return -EFAULT; return -EFAULT;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node) { if (!runqueue_node)
dev_err(dev, "failed to allocate memory\n");
return -ENOMEM; return -ENOMEM;
}
run_cmdlist = &runqueue_node->run_cmdlist; run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list; event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist); INIT_LIST_HEAD(run_cmdlist);

View File

@ -1,19 +0,0 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* YoungJun Cho <yj44.cho@samsung.com>
* Eunchul Kim <chulspro.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_ROTATOR_H_
#define _EXYNOS_DRM_ROTATOR_H_
/* TODO */
#endif

View File

@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
| HDMI_I2S_SEL_LRCK(6)); | HDMI_I2S_SEL_LRCK(6));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
| HDMI_I2S_SEL_SDATA2(4)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
| HDMI_I2S_SEL_SDATA0(4));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
| HDMI_I2S_SEL_SDATA2(2)); | HDMI_I2S_SEL_SDATA2(2));
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
/* I2S_CON_1 & 2 */ /* I2S_CON_1 & 2 */

View File

@ -569,7 +569,7 @@
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) #define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
/* Real input DMA size register */ /* Real input DMA size register */
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)

View File

@ -464,7 +464,7 @@
/* I2S_PIN_SEL_1 */ /* I2S_PIN_SEL_1 */
#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) #define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7)
/* I2S_PIN_SEL_2 */ /* I2S_PIN_SEL_2 */
#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)

View File

@ -36,6 +36,7 @@
#include "meson_venc.h" #include "meson_venc.h"
#include "meson_vpp.h" #include "meson_vpp.h"
#include "meson_viu.h" #include "meson_viu.h"
#include "meson_canvas.h"
#include "meson_registers.h" #include "meson_registers.h"
/* CRTC definition */ /* CRTC definition */
@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
} else } else
meson_vpp_disable_interlace_vscaler_osd1(priv); meson_vpp_disable_interlace_vscaler_osd1(priv);
meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
priv->viu.osd1_addr, priv->viu.osd1_stride,
priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR);
/* Enable OSD1 */ /* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
priv->io_base + _REG(VPP_MISC)); priv->io_base + _REG(VPP_MISC));

View File

@ -43,6 +43,9 @@ struct meson_drm {
bool osd1_commit; bool osd1_commit;
uint32_t osd1_ctrl_stat; uint32_t osd1_ctrl_stat;
uint32_t osd1_blk0_cfg[5]; uint32_t osd1_blk0_cfg[5];
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
} viu; } viu;
struct { struct {

View File

@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Update Canvas with buffer address */ /* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0); gem = drm_fb_cma_get_gem_obj(fb, 0);
meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, priv->viu.osd1_addr = gem->paddr;
gem->paddr, fb->pitches[0], priv->viu.osd1_stride = fb->pitches[0];
fb->height, MESON_CANVAS_WRAP_NONE, priv->viu.osd1_height = fb->height;
MESON_CANVAS_BLKMODE_LINEAR);
spin_unlock_irqrestore(&priv->drm->event_lock, flags); spin_unlock_irqrestore(&priv->drm->event_lock, flags);
} }

View File

@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL; nv_connector->edid = NULL;
} }
ret = pm_runtime_get_sync(connector->dev->dev); /* Outputs are only polled while runtime active, so acquiring a
if (ret < 0 && ret != -EACCES) * runtime PM ref here is unnecessary (and would deadlock upon
return conn_status; * runtime suspend because it waits for polling to finish).
*/
if (!drm_kms_helper_is_poll_worker()) {
ret = pm_runtime_get_sync(connector->dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
nv_encoder = nouveau_connector_ddc_detect(connector); nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return conn_status; return conn_status;
} }

View File

@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
int r; int r;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (encoder) { if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */ /* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
int r; int r;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
encoder = radeon_best_single_encoder(connector); encoder = radeon_best_single_encoder(connector);
if (!encoder) if (!encoder)
@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect) if (!radeon_connector->dac_load_detect)
return ret; return ret;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
encoder = radeon_best_single_encoder(connector); encoder = radeon_best_single_encoder(connector);
if (!encoder) if (!encoder)
@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected) if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret); radeon_connector_update_scratch_regs(connector, ret);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false; bool dret = false, broken_edid = false;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (radeon_connector->detected_hpd_without_ddc) { if (radeon_connector->detected_hpd_without_ddc) {
force = true; force = true;
@ -1436,8 +1454,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
} }
exit: exit:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }
@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst) if (radeon_dig_connector->is_mst)
return connector_status_disconnected; return connector_status_disconnected;
r = pm_runtime_get_sync(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
if (r < 0) r = pm_runtime_get_sync(connector->dev->dev);
return connector_status_disconnected; if (r < 0)
return connector_status_disconnected;
}
if (!force && radeon_check_hpd_status_unchanged(connector)) { if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status; ret = connector->status;
@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
} }
out: out:
pm_runtime_mark_last_busy(connector->dev->dev); if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret; return ret;
} }

View File

@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
{ {
struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)
{ {
struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
const int int_reg[] = { 4, 5, 8, 9}; static const int int_reg[] = { 4, 5, 8, 9};
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);

View File

@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8: case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_GREY:
offset = image->rect.left + image->rect.top * pix->bytesperline; offset = image->rect.left + image->rect.top * pix->bytesperline;
break; break;
case V4L2_PIX_FMT_SBGGR16: case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SGBRG16: case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16: case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16: case V4L2_PIX_FMT_SRGGB16:
case V4L2_PIX_FMT_Y16:
offset = image->rect.left * 2 + offset = image->rect.left * 2 +
image->rect.top * pix->bytesperline; image->rect.top * pix->bytesperline;
break; break;

View File

@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10:
case MEDIA_BUS_FMT_Y10_1X10:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10; cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_10; cfg->data_width = IPU_CSI_DATA_WIDTH_10;
@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12:
case MEDIA_BUS_FMT_Y12_1X12:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW12; cfg->mipi_dt = MIPI_DT_RAW12;
cfg->data_width = IPU_CSI_DATA_WIDTH_12; cfg->data_width = IPU_CSI_DATA_WIDTH_12;

View File

@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
if (pre_node == pre->dev->of_node) { if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex); mutex_unlock(&ipu_pre_list_mutex);
device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
of_node_put(pre_node);
return pre; return pre;
} }
} }
mutex_unlock(&ipu_pre_list_mutex); mutex_unlock(&ipu_pre_list_mutex);
of_node_put(pre_node);
return NULL; return NULL;
} }

View File

@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
mutex_unlock(&ipu_prg_list_mutex); mutex_unlock(&ipu_prg_list_mutex);
device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
prg->id = ipu_id; prg->id = ipu_id;
of_node_put(prg_node);
return prg; return prg;
} }
} }
mutex_unlock(&ipu_prg_list_mutex); mutex_unlock(&ipu_prg_list_mutex);
of_node_put(prg_node);
return NULL; return NULL;
} }

View File

@ -645,6 +645,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
#define USB_DEVICE_ID_LD_JWM 0x1080 #define USB_DEVICE_ID_LD_JWM 0x1080
#define USB_DEVICE_ID_LD_DMMP 0x1081 #define USB_DEVICE_ID_LD_DMMP 0x1081
#define USB_DEVICE_ID_LD_UMIP 0x1090 #define USB_DEVICE_ID_LD_UMIP 0x1090

View File

@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },

View File

@ -123,8 +123,10 @@ config I2C_I801
Wildcat Point (PCH) Wildcat Point (PCH)
Wildcat Point-LP (PCH) Wildcat Point-LP (PCH)
BayTrail (SOC) BayTrail (SOC)
Braswell (SOC)
Sunrise Point-H (PCH) Sunrise Point-H (PCH)
Sunrise Point-LP (PCH) Sunrise Point-LP (PCH)
Kaby Lake-H (PCH)
DNV (SOC) DNV (SOC)
Broxton (SOC) Broxton (SOC)
Lewisburg (PCH) Lewisburg (PCH)

View File

@ -50,6 +50,9 @@
#define BCM2835_I2C_S_CLKT BIT(9) #define BCM2835_I2C_S_CLKT BIT(9)
#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
#define BCM2835_I2C_FEDL_SHIFT 16
#define BCM2835_I2C_REDL_SHIFT 0
#define BCM2835_I2C_CDIV_MIN 0x0002 #define BCM2835_I2C_CDIV_MIN 0x0002
#define BCM2835_I2C_CDIV_MAX 0xFFFE #define BCM2835_I2C_CDIV_MAX 0xFFFE
@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
{ {
u32 divider; u32 divider, redl, fedl;
divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
i2c_dev->bus_clk_rate); i2c_dev->bus_clk_rate);
@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
/*
* Number of core clocks to wait after falling edge before
* outputting the next data bit. Note that both FEDL and REDL
* can't be greater than CDIV/2.
*/
fedl = max(divider / 16, 1u);
/*
* Number of core clocks to wait after rising edge before
* sampling the next incoming data bit.
*/
redl = max(divider / 4, 1u);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL,
(fedl << BCM2835_I2C_FEDL_SHIFT) |
(redl << BCM2835_I2C_REDL_SHIFT));
return 0; return 0;
} }

View File

@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev); i2c_dw_disable_int(dev);
/* Enable the adapter */ /* Enable the adapter */
__i2c_dw_enable(dev, true); __i2c_dw_enable_and_wait(dev, true);
/* Clear and enable interrupts */ /* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR); dw_readl(dev, DW_IC_CLR_INTR);
@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
if (IS_ERR(gpio)) { if (IS_ERR(gpio)) {
r = PTR_ERR(gpio); r = PTR_ERR(gpio);
if (r == -ENOENT) if (r == -ENOENT || r == -ENOSYS)
return 0; return 0;
return r; return r;
} }

View File

@ -58,6 +58,7 @@
* Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes
* Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
* BayTrail (SOC) 0x0f12 32 hard yes yes yes * BayTrail (SOC) 0x0f12 32 hard yes yes yes
* Braswell (SOC) 0x2292 32 hard yes yes yes
* Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
* Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
* DNV (SOC) 0x19df 32 hard yes yes yes * DNV (SOC) 0x19df 32 hard yes yes yes

View File

@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adap); platform_set_drvdata(pdev, adap);
init_completion(&siic->done); init_completion(&siic->done);
/* Controller Initalisation */ /* Controller initialisation */
writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
* but they start to affect the speed when clock is set to faster * but they start to affect the speed when clock is set to faster
* frequencies. * frequencies.
* Through the actual tests, use the different user_div value(which * Through the actual tests, use the different user_div value(which
* in the divider formular 'Fio / (Fi2c * user_div)') to adapt * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
* the different ranges of i2c bus clock frequency, to make the SCL * the different ranges of i2c bus clock frequency, to make the SCL
* more accurate. * more accurate.
*/ */

View File

@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
ASPEED_ADC_INIT_POLLING_TIME, ASPEED_ADC_INIT_POLLING_TIME,
ASPEED_ADC_INIT_TIMEOUT); ASPEED_ADC_INIT_TIMEOUT);
if (ret) if (ret)
goto scaler_error; goto poll_timeout_error;
} }
/* Start all channels in normal mode. */ /* Start all channels in normal mode. */
@ -274,9 +274,10 @@ static int aspeed_adc_probe(struct platform_device *pdev)
writel(ASPEED_OPERATION_MODE_POWER_DOWN, writel(ASPEED_OPERATION_MODE_POWER_DOWN,
data->base + ASPEED_REG_ENGINE_CONTROL); data->base + ASPEED_REG_ENGINE_CONTROL);
clk_disable_unprepare(data->clk_scaler->clk); clk_disable_unprepare(data->clk_scaler->clk);
reset_error:
reset_control_assert(data->rst);
clk_enable_error: clk_enable_error:
poll_timeout_error:
reset_control_assert(data->rst);
reset_error:
clk_hw_unregister_divider(data->clk_scaler); clk_hw_unregister_divider(data->clk_scaler);
scaler_error: scaler_error:
clk_hw_unregister_divider(data->clk_prescaler); clk_hw_unregister_divider(data->clk_prescaler);

View File

@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
int ret; int ret;
u32 val; u32 val;
/* Clear ADRDY by writing one, then enable ADC */
stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
/* Poll for ADRDY to be set (after adc startup time) */ /* Poll for ADRDY to be set (after adc startup time) */
@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
val & STM32H7_ADRDY, val & STM32H7_ADRDY,
100, STM32_ADC_TIMEOUT_US); 100, STM32_ADC_TIMEOUT_US);
if (ret) { if (ret) {
stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
dev_err(&indio_dev->dev, "Failed to enable ADC\n"); dev_err(&indio_dev->dev, "Failed to enable ADC\n");
} else {
/* Clear ADRDY by writing one */
stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
} }
return ret; return ret;

View File

@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL) if (adis->trig == NULL)
return -ENOMEM; return -ENOMEM;
adis->trig->dev.parent = &adis->spi->dev;
adis->trig->ops = &adis_trigger_ops;
iio_trigger_set_drvdata(adis->trig, adis);
ret = request_irq(adis->spi->irq, ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll, &iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING, IRQF_TRIGGER_RISING,
@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret) if (ret)
goto error_free_trig; goto error_free_trig;
adis->trig->dev.parent = &adis->spi->dev;
adis->trig->ops = &adis_trigger_ops;
iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig); ret = iio_trigger_register(adis->trig);
indio_dev->trig = iio_trigger_get(adis->trig); indio_dev->trig = iio_trigger_get(adis->trig);

View File

@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data; struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer; struct iio_buffer *rb = indio_dev->buffer;
if (!indio_dev->info) if (!indio_dev->info || rb == NULL)
return 0; return 0;
poll_wait(filp, &rb->pollq, wait); poll_wait(filp, &rb->pollq, wait);

View File

@ -68,6 +68,8 @@ config SX9500
config SRF08 config SRF08
tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor"
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
depends on I2C depends on I2C
help help
Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 Say Y here to build a driver for Devantech SRF02/SRF08/SRF10

View File

@ -305,16 +305,21 @@ void nldev_exit(void);
static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_pd *pd, struct ib_pd *pd,
struct ib_qp_init_attr *attr, struct ib_qp_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata,
struct ib_uobject *uobj)
{ {
struct ib_qp *qp; struct ib_qp *qp;
if (!dev->create_qp)
return ERR_PTR(-EOPNOTSUPP);
qp = dev->create_qp(pd, attr, udata); qp = dev->create_qp(pd, attr, udata);
if (IS_ERR(qp)) if (IS_ERR(qp))
return qp; return qp;
qp->device = dev; qp->device = dev;
qp->pd = pd; qp->pd = pd;
qp->uobject = uobj;
/* /*
* We don't track XRC QPs for now, because they don't have PD * We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver, * and more importantly they are created internaly by driver,

View File

@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
*/ */
uobj->context = context; uobj->context = context;
uobj->type = type; uobj->type = type;
atomic_set(&uobj->usecnt, 0); /*
* Allocated objects start out as write locked to deny any other
* syscalls from accessing them until they are committed. See
* rdma_alloc_commit_uobject
*/
atomic_set(&uobj->usecnt, -1);
kref_init(&uobj->ref); kref_init(&uobj->ref);
return uobj; return uobj;
@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
goto free; goto free;
} }
uverbs_uobject_get(uobj); /*
* The idr_find is guaranteed to return a pointer to something that
* isn't freed yet, or NULL, as the free after idr_remove goes through
* kfree_rcu(). However the object may still have been released and
* kfree() could be called at any time.
*/
if (!kref_get_unless_zero(&uobj->ref))
uobj = ERR_PTR(-ENOENT);
free: free:
rcu_read_unlock(); rcu_read_unlock();
return uobj; return uobj;
@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
return ret; return ret;
} }
static void lockdep_check(struct ib_uobject *uobj, bool exclusive) static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
{ {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
if (exclusive) if (exclusive)
WARN_ON(atomic_read(&uobj->usecnt) > 0); WARN_ON(atomic_read(&uobj->usecnt) != -1);
else else
WARN_ON(atomic_read(&uobj->usecnt) == -1); WARN_ON(atomic_read(&uobj->usecnt) <= 0);
#endif #endif
} }
@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0; return 0;
} }
lockdep_check(uobj, true); assert_uverbs_usecnt(uobj, true);
ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
up_read(&ucontext->cleanup_rwsem); up_read(&ucontext->cleanup_rwsem);
@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0; return 0;
} }
lockdep_check(uobject, true); assert_uverbs_usecnt(uobject, true);
ret = uobject->type->type_class->remove_commit(uobject, ret = uobject->type->type_class->remove_commit(uobject,
RDMA_REMOVE_DESTROY); RDMA_REMOVE_DESTROY);
if (ret) if (ret)
return ret; goto out;
uobject->type = &null_obj_type; uobject->type = &null_obj_type;
out:
up_read(&ucontext->cleanup_rwsem); up_read(&ucontext->cleanup_rwsem);
return 0; return ret;
} }
static void alloc_commit_idr_uobject(struct ib_uobject *uobj) static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
return ret; return ret;
} }
/* matches atomic_set(-1) in alloc_uobj */
assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
uobj->type->type_class->alloc_commit(uobj); uobj->type->type_class->alloc_commit(uobj);
up_read(&uobj->context->cleanup_rwsem); up_read(&uobj->context->cleanup_rwsem);
@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{ {
lockdep_check(uobj, exclusive); assert_uverbs_usecnt(uobj, exclusive);
uobj->type->type_class->lookup_put(uobj, exclusive); uobj->type->type_class->lookup_put(uobj, exclusive);
/* /*
* In order to unlock an object, either decrease its usecnt for * In order to unlock an object, either decrease its usecnt for

View File

@ -7,7 +7,6 @@
#include <rdma/restrack.h> #include <rdma/restrack.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
void rdma_restrack_init(struct rdma_restrack_root *res) void rdma_restrack_init(struct rdma_restrack_root *res)
@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{ {
enum rdma_restrack_type type = res->type; enum rdma_restrack_type type = res->type;
struct ib_device *dev; struct ib_device *dev;
struct ib_xrcd *xrcd;
struct ib_pd *pd; struct ib_pd *pd;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_qp *qp; struct ib_qp *qp;
@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
qp = container_of(res, struct ib_qp, res); qp = container_of(res, struct ib_qp, res);
dev = qp->device; dev = qp->device;
break; break;
case RDMA_RESTRACK_XRCD:
xrcd = container_of(res, struct ib_xrcd, res);
dev = xrcd->device;
break;
default: default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", type); WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
return NULL; return NULL;
@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return dev; return dev;
} }
static bool res_is_user(struct rdma_restrack_entry *res)
{
switch (res->type) {
case RDMA_RESTRACK_PD:
return container_of(res, struct ib_pd, res)->uobject;
case RDMA_RESTRACK_CQ:
return container_of(res, struct ib_cq, res)->uobject;
case RDMA_RESTRACK_QP:
return container_of(res, struct ib_qp, res)->uobject;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return false;
}
}
void rdma_restrack_add(struct rdma_restrack_entry *res) void rdma_restrack_add(struct rdma_restrack_entry *res)
{ {
struct ib_device *dev = res_to_dev(res); struct ib_device *dev = res_to_dev(res);
@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (!dev) if (!dev)
return; return;
if (!uaccess_kernel()) { if (res_is_user(res)) {
get_task_struct(current); get_task_struct(current);
res->task = current; res->task = current;
res->kern_name = NULL; res->kern_name = NULL;

View File

@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (f.file) if (f.file)
fdput(f); fdput(f);
mutex_unlock(&file->device->xrcd_tree_mutex);
uobj_alloc_commit(&obj->uobject); uobj_alloc_commit(&obj->uobject);
mutex_unlock(&file->device->xrcd_tree_mutex);
return in_len; return in_len;
err_copy: err_copy:
@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
file->ucontext); file->ucontext);
if (IS_ERR(uobj)) { if (IS_ERR(uobj))
mutex_unlock(&file->device->xrcd_tree_mutex);
return PTR_ERR(uobj); return PTR_ERR(uobj);
}
ret = uobj_remove_commit(uobj); ret = uobj_remove_commit(uobj);
return ret ?: in_len; return ret ?: in_len;
@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
struct ib_uverbs_ex_create_cq_resp resp; struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {}; struct ib_cq_init_attr attr = {};
if (!ib_dev->create_cq)
return ERR_PTR(-EOPNOTSUPP);
if (cmd->comp_vector >= file->device->num_comp_vectors) if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
resp.response_length = offsetof(typeof(resp), response_length) + resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length); sizeof(resp.response_length);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
ret = cb(file, obj, &resp, ucore, context); ret = cb(file, obj, &resp, ucore, context);
if (ret) if (ret)
goto err_cb; goto err_cb;
uobj_alloc_commit(&obj->uobject); uobj_alloc_commit(&obj->uobject);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
return obj; return obj;
err_cb: err_cb:
@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT) if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr); qp = ib_create_qp(pd, &attr);
else else
qp = _ib_create_qp(device, pd, &attr, uhw); qp = _ib_create_qp(device, pd, &attr, uhw,
&obj->uevent.uobject);
if (IS_ERR(qp)) { if (IS_ERR(qp)) {
ret = PTR_ERR(qp); ret = PTR_ERR(qp);
@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
atomic_inc(&attr.srq->usecnt); atomic_inc(&attr.srq->usecnt);
if (ind_tbl) if (ind_tbl)
atomic_inc(&ind_tbl->usecnt); atomic_inc(&ind_tbl->usecnt);
} else {
/* It is done in _ib_create_qp for other QP types */
qp->uobject = &obj->uevent.uobject;
} }
qp->uobject = &obj->uevent.uobject;
obj->uevent.uobject.object = qp; obj->uevent.uobject.object = qp;
@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp; goto release_qp;
} }
if ((cmd->base.attr_mask & IB_QP_AV) &&
!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
!rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
!rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
ret = -EINVAL; ret = -EINVAL;
goto release_qp; goto release_qp;
} }
@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
wq_init_attr.create_flags = cmd.create_flags; wq_init_attr.create_flags = cmd.create_flags;
obj->uevent.events_reported = 0; obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->uevent.event_list);
if (!pd->device->create_wq) {
err = -EOPNOTSUPP;
goto err_put_cq;
}
wq = pd->device->create_wq(pd, &wq_init_attr, uhw); wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
if (IS_ERR(wq)) { if (IS_ERR(wq)) {
err = PTR_ERR(wq); err = PTR_ERR(wq);
@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
wq_attr.flags = cmd.flags; wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask; wq_attr.flags_mask = cmd.flags_mask;
} }
if (!wq->device->modify_wq) {
ret = -EOPNOTSUPP;
goto out;
}
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
out:
uobj_put_obj_read(wq); uobj_put_obj_read(wq);
return ret; return ret;
} }
@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs; init_attr.ind_tbl = wqs;
if (!ib_dev->create_rwq_ind_table) {
err = -EOPNOTSUPP;
goto err_uobj;
}
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
if (IS_ERR(rwq_ind_tbl)) { if (IS_ERR(rwq_ind_tbl)) {
@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_device_attr attr = {0}; struct ib_device_attr attr = {0};
int err; int err;
if (!ib_dev->query_device)
return -EOPNOTSUPP;
if (ucore->inlen < sizeof(cmd)) if (ucore->inlen < sizeof(cmd))
return -EINVAL; return -EINVAL;

View File

@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return 0; return 0;
} }
if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
return -EINVAL;
spec = &attr_spec_bucket->attrs[attr_id]; spec = &attr_spec_bucket->attrs[attr_id];
e = &elements[attr_id]; e = &elements[attr_id];
e->uattr = uattr_ptr; e->uattr = uattr_ptr;

View File

@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
short min = SHRT_MAX; short min = SHRT_MAX;
const void *elem; const void *elem;
int i, j, last_stored = -1; int i, j, last_stored = -1;
unsigned int equal_min = 0;
for_each_element(elem, i, j, elements, num_elements, num_offset, for_each_element(elem, i, j, elements, num_elements, num_offset,
data_offset) { data_offset) {
@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
*/ */
iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
last_stored = i; last_stored = i;
if (min == GET_ID(id))
equal_min++;
else
equal_min = 1;
min = GET_ID(id); min = GET_ID(id);
} }
@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
* Therefore, we need to clean the beginning of the array to make sure * Therefore, we need to clean the beginning of the array to make sure
* all ids of final elements are equal to min. * all ids of final elements are equal to min.
*/ */
for (i = num_iters - 1; i >= 0 && memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
;
num_iters -= i + 1;
memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
*min_id = min; *min_id = min;
return num_iters; return equal_min;
} }
#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
hash = kzalloc(sizeof(*hash) + hash = kzalloc(sizeof(*hash) +
ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
sizeof(long)) + sizeof(long)) +
BITS_TO_LONGS(attr_max_bucket) * sizeof(long), BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
GFP_KERNEL); GFP_KERNEL);
if (!hash) { if (!hash) {
res = -ENOMEM; res = -ENOMEM;
@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
* first handler which != NULL. This also defines the * first handler which != NULL. This also defines the
* set of flags used for this handler. * set of flags used for this handler.
*/ */
for (i = num_object_defs - 1; for (i = num_method_defs - 1;
i >= 0 && !method_defs[i]->handler; i--) i >= 0 && !method_defs[i]->handler; i--)
; ;
hash->methods[min_id++] = method; hash->methods[min_id++] = method;

View File

@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
return -1; return -1;
} }
static bool verify_command_idx(u32 command, bool extended)
{
if (extended)
return command < ARRAY_SIZE(uverbs_ex_cmd_table);
return command < ARRAY_SIZE(uverbs_cmd_table);
}
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev; struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr; struct ib_uverbs_cmd_hdr hdr;
bool extended_command;
__u32 command; __u32 command;
__u32 flags; __u32 flags;
int srcu_key; int srcu_key;
@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
if (!verify_command_idx(command, extended_command)) {
ret = -EINVAL;
goto out;
}
if (verify_command_mask(ib_dev, command)) { if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out; goto out;
} }
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
if (!flags) { if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) || if (!uverbs_cmd_table[command]) {
!uverbs_cmd_table[command]) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_udata uhw; struct ib_udata uhw;
size_t written_count = count; size_t written_count = count;
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || if (!uverbs_ex_cmd_table[command]) {
!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS; ret = -ENOSYS;
goto out; goto out;
} }
@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
.llseek = no_llseek, .llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl, .unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif #endif
}; };
@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
.llseek = no_llseek, .llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl, .unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif #endif
}; };

View File

@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
uverbs_attr_get(ctx, UVERBS_UHW_OUT); uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) { if (!IS_ERR(uhw_in)) {
udata->inbuf = uhw_in->ptr_attr.ptr;
udata->inlen = uhw_in->ptr_attr.len; udata->inlen = uhw_in->ptr_attr.len;
if (uverbs_attr_ptr_is_inline(uhw_in))
udata->inbuf = &uhw_in->uattr->data;
else
udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else { } else {
udata->inbuf = NULL; udata->inbuf = NULL;
udata->inlen = 0; udata->inlen = 0;
} }
if (!IS_ERR(uhw_out)) { if (!IS_ERR(uhw_out)) {
udata->outbuf = uhw_out->ptr_attr.ptr; udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
udata->outlen = uhw_out->ptr_attr.len; udata->outlen = uhw_out->ptr_attr.len;
} else { } else {
udata->outbuf = NULL; udata->outbuf = NULL;
@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
cq->res.type = RDMA_RESTRACK_CQ; cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res); rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
if (ret) if (ret)
goto err_cq; goto err_cq;
@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
resp.comp_events_reported = obj->comp_events_reported; resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported; resp.async_events_reported = obj->async_events_reported;
return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
} }
static DECLARE_UVERBS_METHOD( static DECLARE_UVERBS_METHOD(

View File

@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs) if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr); rdma_rw_init_qp(device, qp_init_attr);
qp = _ib_create_qp(device, pd, qp_init_attr, NULL); qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
if (IS_ERR(qp)) if (IS_ERR(qp))
return qp; return qp;
@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
} }
qp->real_qp = qp; qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type; qp->qp_type = qp_init_attr->qp_type;
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;

View File

@ -120,7 +120,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5 #define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev; struct net_device *netdev;
unsigned int version, major, minor; unsigned int version, major, minor;
@ -158,6 +157,7 @@ struct bnxt_re_dev {
atomic_t srq_count; atomic_t srq_count;
atomic_t mr_count; atomic_t mr_count;
atomic_t mw_count; atomic_t mw_count;
atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */ /* Max of 2 lossless traffic class supported per port */
u16 cosq[2]; u16 cosq[2];

View File

@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
if (dev_attr->is_atomic) { ib_attr->atomic_cap = IB_ATOMIC_NONE;
ib_attr->atomic_cap = IB_ATOMIC_HCA; ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
}
ib_attr->max_ee_rd_atom = 0; ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0; ib_attr->max_res_rd_atom = 0;
@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{
unsigned long flags;
spin_lock_irqsave(&qp->scq->cq_lock, flags);
if (qp->rcq != qp->scq)
spin_lock(&qp->rcq->cq_lock);
else
__acquire(&qp->rcq->cq_lock);
return flags;
}
static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
unsigned long flags)
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{
if (qp->rcq != qp->scq)
spin_unlock(&qp->rcq->cq_lock);
else
__release(&qp->rcq->cq_lock);
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
/* Queue Pairs */ /* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp) int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{ {
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
int rc; int rc;
unsigned int flags;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
return rc; return rc;
} }
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
&rdev->sqp_ah->qplib_ah); &rdev->sqp_ah->qplib_ah);
@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc; return rc;
} }
bnxt_qplib_del_flush_qp(&qp->qplib_qp); bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp); &rdev->qp1_sqp->qplib_qp);
if (rc) { if (rc) {
@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail; goto fail;
} }
qp->qplib_qp.scq = &cq->qplib_cq; qp->qplib_qp.scq = &cq->qplib_cq;
qp->scq = cq;
} }
if (qp_init_attr->recv_cq) { if (qp_init_attr->recv_cq) {
@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail; goto fail;
} }
qp->qplib_qp.rcq = &cq->qplib_cq; qp->qplib_qp.rcq = &cq->qplib_cq;
qp->rcq = cq;
} }
if (qp_init_attr->srq) { if (qp_init_attr->srq) {
@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
goto fail; goto free_umem;
} }
} }
@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
return &qp->ib_qp; return &qp->ib_qp;
qp_destroy: qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem:
if (udata) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
ib_umem_release(qp->sumem);
}
fail: fail:
kfree(qp); kfree(qp);
return ERR_PTR(rc); return ERR_PTR(rc);
@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev), dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n", "Move QP = %p out of flush list\n",
qp); qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp); bnxt_qplib_clean_qp(&qp->qplib_qp);
} }
} }
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {

View File

@ -89,6 +89,8 @@ struct bnxt_re_qp {
/* QP1 */ /* QP1 */
u32 send_psn; u32 send_psn;
struct ib_ud_header qp1_hdr; struct ib_ud_header qp1_hdr;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
}; };
struct bnxt_re_cq { struct bnxt_re_cq {

View File

@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
mutex_unlock(&bnxt_re_dev_lock); mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu(); synchronize_rcu();
flush_workqueue(bnxt_re_wq);
ib_dealloc_device(&rdev->ibdev); ib_dealloc_device(&rdev->ibdev);
/* rdev is gone */ /* rdev is gone */
@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
break; break;
} }
smp_mb__before_atomic(); smp_mb__before_atomic();
clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); atomic_dec(&rdev->sched_count);
kfree(re_work); kfree(re_work);
} }
@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* netdev notifier will call NETDEV_UNREGISTER again later since /* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev * we are still holding the reference to the netdev
*/ */
if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) if (atomic_read(&rdev->sched_count) > 0)
goto exit; goto exit;
bnxt_re_ib_unreg(rdev, false); bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ? re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev); NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task); INIT_WORK(&re_work->work, bnxt_re_task);
set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work); queue_work(bnxt_re_wq, &re_work->work);
} }
} }
@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
*/ */
list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device"); dev_info(rdev_to_dev(rdev), "Unregistering Device");
/*
* Flush out any scheduled tasks before destroying the
* resources
*/
flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev); bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true); bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);

View File

@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
} }
} }
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{ {
unsigned long flags; unsigned long flags;
@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req; struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp; struct creq_destroy_qp_resp resp;
unsigned long flags;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc; int rc;
@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
return rc; return rc;
} }
/* Must walk the associated CQs to nullified the QP ptr */ return 0;
spin_lock_irqsave(&qp->scq->hwq.lock, flags); }
__clean_cq(qp->scq, (u64)(unsigned long)qp);
if (qp->rcq && qp->rcq != qp->scq) {
spin_lock(&qp->rcq->hwq.lock);
__clean_cq(qp->rcq, (u64)(unsigned long)qp);
spin_unlock(&qp->rcq->hwq.lock);
}
spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
bnxt_qplib_free_qp_hdr_buf(res, qp); bnxt_qplib_free_qp_hdr_buf(res, qp);
bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
kfree(qp->sq.swq); kfree(qp->sq.swq);
@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
if (qp->orrq.max_elements) if (qp->orrq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &qp->orrq); bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
return 0;
} }
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,

View File

@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge); struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags); unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,

View File

@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */ /* Device */
static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{
int rc;
u16 pcie_ctl2;
rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
&pcie_ctl2);
if (rc)
return false;
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver) char *fw_ver)
{ {
@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
} }
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); attr->is_atomic = 0;
bail: bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc; return rc;

View File

@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_cq *cmd = &req.create_cq; struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd; struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = resp->cqe; cq->ibcq.cqe = resp->cqe;
cq->cq_handle = resp->cq_handle; cq->cq_handle = resp->cq_handle;
cq_resp.cqn = resp->cq_handle;
spin_lock_irqsave(&dev->cq_tbl_lock, flags); spin_lock_irqsave(&dev->cq_tbl_lock, flags);
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->uar = &(to_vucontext(context)->uar); cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */ /* Copy udata back. */
if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back udata\n"); "failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq); pvrdma_destroy_cq(&cq->ibcq);

View File

@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
struct pvrdma_create_srq_resp srq_resp = {0};
struct pvrdma_create_srq ucmd; struct pvrdma_create_srq ucmd;
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
} }
srq->srq_handle = resp->srqn; srq->srq_handle = resp->srqn;
srq_resp.srqn = resp->srqn;
spin_lock_irqsave(&dev->srq_tbl_lock, flags); spin_lock_irqsave(&dev->srq_tbl_lock, flags);
dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */ /* Copy udata back. */
if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq); pvrdma_destroy_srq(&srq->ibsrq);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);

View File

@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret; int ret;
void *ptr; void *ptr;
@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
pd->privileged = !context; pd->privileged = !context;
pd->pd_handle = resp->pd_handle; pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle; pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
if (context) { if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n"); "failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd); pvrdma_dealloc_pd(&pd->ibpd);

View File

@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry); debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry); debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL; priv->mcg_dentry = priv->path_dentry = NULL;

View File

@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)
__FILE__, __LINE__, iocnum); __FILE__, __LINE__, iocnum);
return -ENODEV; return -ENODEV;
} }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
ioc->name)); ioc->name));

View File

@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
goto out; goto out;
} }
if (bus->dev_state == MEI_DEV_POWER_DOWN) {
dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
err = 0;
goto out;
}
err = mei_cl_disconnect(cl); err = mei_cl_disconnect(cl);
if (err < 0) if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n"); dev_err(bus->dev, "Could not disconnect from the ME client\n");

View File

@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0; return 0;
} }
if (dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl);
return 0;
}
rets = pm_runtime_get(dev->dev); rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) { if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev); pm_runtime_put_noidle(dev->dev);

View File

@ -132,6 +132,11 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
/* /*
* MEI HW Section * MEI HW Section
*/ */

Some files were not shown because too many files have changed in this diff Show More