Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-03-10 10:28:27 +01:00
commit 6cbe9e4a22
315 changed files with 2906 additions and 2255 deletions

View File

@ -56,10 +56,6 @@
<entry><constant>MEDIA_ENT_F_CONN_COMPOSITE</constant></entry>
<entry>Connector for a RGB composite signal.</entry>
</row>
<row>
<entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry>
<entry>Connector for a test generator.</entry>
</row>
<row>
<entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
<entry>Camera video sensor entity.</entry>

View File

@ -400,3 +400,7 @@ wm8350_wdt:
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)
-------------------------------------------------
sun4v_wdt:
timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
nowayout: Watchdog cannot be stopped once started
-------------------------------------------------

View File

@ -4518,6 +4518,12 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
FREESCALE GPMI NAND DRIVER
M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/nand/gpmi-nand/*
FREESCALE I2C CPM DRIVER
M: Jochen Friedrich <jochen@scram.de>
L: linuxppc-dev@lists.ozlabs.org
@ -4534,7 +4540,7 @@ F: include/linux/platform_data/video-imxfb.h
F: drivers/video/fbdev/imxfb.c
FREESCALE QUAD SPI DRIVER
M: Han Xu <han.xu@freescale.com>
M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/spi-nor/fsl-quadspi.c
@ -4548,6 +4554,15 @@ S: Maintained
F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE IMX / MXC FEC DRIVER
M: Fugang Duan <fugang.duan@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/freescale/fec_main.c
F: drivers/net/ethernet/freescale/fec_ptp.c
F: drivers/net/ethernet/freescale/fec.h
F: Documentation/devicetree/bindings/net/fsl-fec.txt
FREESCALE QUICC ENGINE LIBRARY
L: linuxppc-dev@lists.ozlabs.org
S: Orphan
@ -6764,6 +6779,7 @@ S: Maintained
F: Documentation/networking/mac80211-injection.txt
F: include/net/mac80211.h
F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch]
MACVLAN DRIVER
M: Patrick McHardy <kaber@trash.net>

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 5
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Blurry Fish Butt
# *DOCUMENTATION*

View File

@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
$(call cmd,shipped)

View File

@ -283,7 +283,6 @@ &hsusb {
pinctrl-names = "default";
status = "okay";
renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>;
};
&usbphy {

View File

@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_ARM_PSCI),y)
obj-$(CONFIG_SMP) += psci_smp.o
endif

View File

@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_core_regs(void)

View File

@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
if (!numpages)
return 0;
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;

View File

@ -34,13 +34,13 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*
* VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
* VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START)
@ -51,7 +51,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL

View File

@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu)
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
#ifdef CONFIG_KASAN
mov x0, sp
bl kasan_unpoison_remaining_stack
#endif
mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]

View File

@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
/**

View File

@ -319,8 +319,8 @@ void __init mem_init(void)
#endif
MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG((unsigned long)vmemmap,
(unsigned long)vmemmap + VMEMMAP_SIZE),
MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif

View File

@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{

View File

@ -125,7 +125,7 @@ LEAF(_restore_fp_context)
END(_restore_fp_context)
.set reorder
.type fault@function
.type fault, @function
.ent fault
fault: li v0, -EFAULT
jr ra

View File

@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper)
.set reorder
.type fault@function
.type fault, @function
.ent fault
fault: li v0, -EFAULT # failure
jr ra

View File

@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
siginfo_t info;
siginfo_t info = {
.si_signo = SIGFPE,
.si_code = FPE_INTOVF,
.si_addr = (void __user *)regs->cp0_epc,
};
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state);
}
@ -874,7 +874,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
siginfo_t info;
siginfo_t info = { 0 };
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;

View File

@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
return copy_to_user(uaddr, vs, 16);
return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
return copy_from_user(vs, uaddr, 16);
return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}

View File

@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
c->scache.sets = 64 << sets;
if (sets)
c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
c->scache.linesz = 2 << line_sz;
if (line_sz)
c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
if (c->scache.linesz) {
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return 1;
}
return 1;
return 0;
}
static inline int __init mips_sc_probe(void)

View File

@ -33,7 +33,7 @@
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)

View File

@ -361,8 +361,9 @@
#define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344)
#define __NR_mlock2 (__NR_Linux + 345)
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
#define __NR_Linux_syscalls (__NR_copy_file_range + 1)
#define __IGNORE_select /* newselect */

View File

@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
/* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
tracehook_report_syscall_entry(regs)) {
/*
* Tracing decided this syscall should not happen or the
* debugger stored an invalid system call number. Skip
* the system call and the system call restart handling.
*/
regs->gr[20] = -1UL;
goto out;
}
#ifdef CONFIG_64BIT
if (!is_compat_task())
@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
return ret ? : regs->gr[20];
out:
return regs->gr[20];
}
void do_syscall_trace_exit(struct pt_regs *regs)

View File

@ -343,7 +343,7 @@ tracesys_next:
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
b,n .Ltracesys_nosys
LDREGX %r20(%r19), %r19
@ -359,6 +359,9 @@ tracesys_next:
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
.Ltracesys_nosys:
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */

View File

@ -441,6 +441,7 @@
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */
ENTRY_SAME(copy_file_range)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

View File

@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
* FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/
if (bp->ctx && bp->ctx->task)
if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
bp->ctx->task->thread.last_hit_ubp = NULL;
}

View File

@ -8,6 +8,8 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_FSL_BOOK3E
#ifdef CONFIG_PPC64
static inline int tlb1_next(void)
@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void)
unsigned long tmp;
int token = smp_processor_id() + 1;
/*
* Besides being unnecessary in the absence of SMT, this
* check prevents trying to do lbarx/stbcx. on e5500 which
* doesn't implement either feature.
*/
if (!cpu_has_feature(CPU_FTR_SMT))
return;
asm volatile("1: lbarx %0, 0, %1;"
"cmpwi %0, 0;"
"bne 2f;"
@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void)
{
struct paca_struct *paca = get_paca();
if (!cpu_has_feature(CPU_FTR_SMT))
return;
isync();
paca->tcd_ptr->lock = 0;
}

View File

@ -24,7 +24,13 @@ LDFLAGS := -m elf32_sparc
export BITS := 32
UTS_MACHINE := sparc
# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some
# versions of gcc. Some gcc versions won't pass -Av8 to binutils when you
# give -mcpu=v8. This silently worked with older bintutils versions but
# does not any more.
KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
KBUILD_CFLAGS += -Wa,-Av8
KBUILD_AFLAGS += -m32 -Wa,-Av8
else

View File

@ -422,8 +422,9 @@
#define __NR_listen 354
#define __NR_setsockopt 355
#define __NR_mlock2 356
#define __NR_copy_file_range 357
#define NR_syscalls 357
#define NR_syscalls 358
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

View File

@ -948,7 +948,24 @@ linux_syscall_trace:
cmp %o0, 0
bne 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ld [%sp + STACKFRAME_SZ + PT_G1], %g1
sethi %hi(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I0], %i0
or %l7, %lo(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I1], %i1
ld [%sp + STACKFRAME_SZ + PT_I2], %i2
ld [%sp + STACKFRAME_SZ + PT_I3], %i3
ld [%sp + STACKFRAME_SZ + PT_I4], %i4
ld [%sp + STACKFRAME_SZ + PT_I5], %i5
cmp %g1, NR_syscalls
bgeu 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0
ld [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3

View File

@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP
brnz,a,pn %o4, 0f
stx %o1, [%o4]
retl
0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)

View File

@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
unsigned char fenab;
int err;
flush_user_windows();
synchronize_user_stack();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))

View File

@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
EXPORT_SYMBOL(sun4v_mach_set_watchdog);
/* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8);

View File

@ -158,7 +158,25 @@ linux_syscall_trace32:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
srl %i0, 0, %o0
lduw [%l7 + %l4], %l7
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
@ -170,7 +188,25 @@ linux_syscall_trace:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0
lduw [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3

View File

@ -88,4 +88,4 @@ sys_call_table:
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
/*355*/ .long sys_setsockopt, sys_mlock2
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range

View File

@ -89,7 +89,7 @@ sys_call_table32:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word compat_sys_setsockopt, sys_mlock2
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
#endif /* CONFIG_COMPAT */
@ -170,4 +170,4 @@ sys_call_table:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word sys_setsockopt, sys_mlock2
.word sys_setsockopt, sys_mlock2, sys_copy_file_range

View File

@ -12,6 +12,7 @@
#include <skas.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static void kill_off_processes(void)
{

View File

@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig;
int handled_sig = 0;
if (get_signal(&ksig)) {
while (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);

View File

@ -16,6 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/realmode.h>
#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
/*
* Pause/unpause graph tracing around do_suspend_lowlevel as it has
* inconsistent call/return info after it jumps to the wakeup vector.
*/
pause_graph_tracing();
do_suspend_lowlevel();
unpause_graph_tracing();
return 0;
}

View File

@ -596,6 +596,8 @@ struct vcpu_vmx {
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
u64 current_tsc_ratio;
};
enum segment_cache_field {
@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
/* Setup TSC multiplier */
if (cpu_has_vmx_tsc_scaling())
vmcs_write64(TSC_MULTIPLIER,
vcpu->arch.tsc_scaling_ratio);
vmx->loaded_vmcs->cpu = cpu;
}
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
vmx_vcpu_pi_load(vcpu, cpu);
}

View File

@ -2752,7 +2752,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@ -6619,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again.
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
int i;
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu);
for (i = 0; i < KVM_NR_DB_REGS; i++)
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
kvm_update_dr0123(vcpu);
kvm_update_dr6(vcpu);
kvm_update_dr7(vcpu);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
/*

View File

@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
exit(1);
}
printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
printf("Locating the top of the address space ... ");
fflush(stdout);
@ -134,7 +134,7 @@ unsigned long os_get_top_address(void)
exit(1);
}
top <<= UM_KERN_PAGE_SHIFT;
printf("0x%x\n", top);
printf("0x%lx\n", top);
return top;
}

View File

@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
static int __blk_rq_map_user_iov(struct request *rq,
struct rq_map_data *map_data, struct iov_iter *iter,
gfp_t gfp_mask, bool copy)
{
struct request_queue *q = rq->q;
struct bio *bio, *orig_bio;
int ret;
if (copy)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
iov_iter_advance(iter, bio->bi_iter.bi_size);
if (map_data)
map_data->offset += bio->bi_iter.bi_size;
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
ret = blk_rq_append_bio(q, rq, bio);
if (ret) {
bio_endio(bio);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}
return 0;
}
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
struct bio *bio;
int unaligned = 0;
struct iov_iter i;
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
bool copy = (q->dma_pad_mask & iter->count) || map_data;
struct bio *bio = NULL;
struct iov_iter i;
int ret;
if (!iter || !iter->count)
return -EINVAL;
@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
*/
if ((uaddr & queue_dma_alignment(q)) ||
iovec_gap_to_prv(q, &prv, &iov))
unaligned = 1;
copy = true;
prv.iov_base = iov.iov_base;
prv.iov_len = iov.iov_len;
}
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
if (bio->bi_iter.bi_size != iter->count) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio);
__blk_rq_unmap_user(bio);
return -EINVAL;
}
i = *iter;
do {
ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
if (ret)
goto unmap_rq;
if (!bio)
bio = rq->bio;
} while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
rq->bio = NULL;
return -EINVAL;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);

View File

@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
struct bio_vec end_bv = { NULL }, nxt_bv;
struct bvec_iter iter;
if (!blk_queue_cluster(q))
return 0;
@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio))
return 1;
bio_for_each_segment(end_bv, bio, iter)
if (end_bv.bv_len == iter.bi_size)
break;
nxt_bv = bio_iovec(nxt);
bio_get_last_bvec(bio, &end_bv);
bio_get_first_bvec(nxt, &nxt_bv);
if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0;

View File

@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
start = ndr_desc->res->start;
len = ndr_desc->res->end - ndr_desc->res->start + 1;
/*
* If ARS is unimplemented, unsupported, or if the 'Persistent Memory
* Scrub' flag in extended status is not set, skip this but continue
* initialization
*/
rc = ars_get_cap(nd_desc, ars_cap, start, len);
if (rc == -ENOTTY) {
dev_dbg(acpi_desc->dev,
"Address Range Scrub is not implemented, won't create an error list\n");
rc = 0;
goto out;
}
if (rc)
goto out;
/*
* If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
* extended status is not set, skip this but continue initialization
*/
if ((ars_cap->status & 0xffff) ||
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
dev_warn(acpi_desc->dev,

View File

@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
#ifdef CONFIG_ARM64
/*
* Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
* Workaround is to make sure all pending IRQs are served before leaving
* handler.
*/
static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
unsigned int handled = 1;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
do {
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
rc = ahci_handle_port_intr(host, irq_masked);
if (!rc)
handled = 0;
writel(irq_stat, mmio + HOST_IRQ_STAT);
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
#endif
/*
* ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
* to single msi.
@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
#ifdef CONFIG_ARM64
if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
hpriv->irq_handler = ahci_thunderx_irq_handler;
#endif
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);

View File

@ -240,8 +240,7 @@ enum {
error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as
Edge Triggered */
#ifdef CONFIG_PCI_MSI
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@ -361,6 +360,7 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated.
*/
void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
};
#ifdef CONFIG_PCI_MSI
@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap);
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)

View File

@ -548,6 +548,88 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
* @ata_host: Host that recieved the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
* the HOST_IRQ_STAT register misses the edge interrupt
* when clearing of HOST_IRQ_STAT register and hardware
* reporting the PORT_IRQ_STAT register at the
* same clock cycle.
* As such, the algorithm below outlines the workaround.
*
* 1. Read HOST_IRQ_STAT register and save the state.
* 2. Clear the HOST_IRQ_STAT register.
* 3. Read back the HOST_IRQ_STAT register.
* 4. If HOST_IRQ_STAT register equals to zero, then
* traverse the rest of port's PORT_IRQ_STAT register
* to check if an interrupt is triggered at that point else
* go to step 6.
* 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
* then update the state of HOST_IRQ_STAT saved in step 1.
* 6. Handle port interrupts.
* 7. Exit
*/
static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
u32 irq_masked)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *port_mmio;
int i;
if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
for (i = 0; i < host->n_ports; i++) {
if (irq_masked & (1 << i))
continue;
port_mmio = ahci_port_base(host->ports[i]);
if (readl(port_mmio + PORT_IRQ_STAT))
irq_masked |= (1 << i);
}
}
return ahci_handle_port_intr(host, irq_masked);
}
static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
/*
* HOST_IRQ_STAT behaves as edge triggered latch meaning that
* it should be cleared before all the port events are cleared.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
@ -779,7 +861,8 @@ static int xgene_ahci_probe(struct platform_device *pdev)
hpriv->flags = AHCI_HFLAG_NO_NCQ;
break;
case XGENE_AHCI_V2:
hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
hpriv->flags |= AHCI_HFLAG_YES_FBS;
hpriv->irq_handler = xgene_ahci_irq_intr;
break;
default:
break;

View File

@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
const char *buf, size_t size);
static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
if (!hpriv->irq_handler)
hpriv->irq_handler = ahci_single_level_irq_intr;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
if ((tmp & PORT_CMD_HPCP) ||
((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
return IRQ_HANDLED;
}
static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
{
unsigned int i, handled = 0;
@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
return handled;
}
static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
/*
* HOST_IRQ_STAT behaves as edge triggered latch meaning that
* it should be cleared before all the port events are cleared.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
rc = ahci_handle_port_intr(host, irq_masked);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}
EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
{
@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
if (hpriv->irq_handler)
dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
and custom irq handler implemented\n");
rc = ahci_host_activate_multi_irqs(host, sht);
else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
IRQF_SHARED, sht);
else
rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
} else {
rc = ata_host_activate(host, irq, hpriv->irq_handler,
IRQF_SHARED, sht);
}
return rc;
}
EXPORT_SYMBOL_GPL(ahci_host_activate);

View File

@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg)
{
int val = -EINVAL, rc = -EINVAL;
unsigned long val;
int rc = -EINVAL;
unsigned long flags;
switch (cmd) {
case ATA_IOC_GET_IO32:
case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
if (copy_to_user(arg, &val, 1))
return -EFAULT;
return 0;
return put_user(val, (unsigned long __user *)arg);
case ATA_IOC_SET_IO32:
case HDIO_SET_32BIT:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);

View File

@ -32,6 +32,8 @@
#include <linux/libata.h>
#include <scsi/scsi_host.h>
#include <asm/mach-rc32434/rb.h>
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio;
struct resource *res;
struct ata_host *ah;
struct cf_device *pdata;
struct rb532_cf_info *info;
int ret;
@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT;
}
gpio = irq_to_gpio(irq);
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
gpio = pdata->gpio_pin;
if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT;

View File

@ -296,6 +296,7 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs

View File

@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
SoCs.
config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support"
tristate "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
depends on !CPU_THERMAL || THERMAL=y
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.

View File

@ -17,6 +17,7 @@
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>

View File

@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN;
if (sw_desc->cyclic)
sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
}
static bool is_desc_completed(struct virt_dma_desc *vd)
@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
}
if (is_desc_completed(vd)) {
list_del(&vd->node);
vchan_cookie_complete(vd);
@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags);

View File

@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& participants[channel]) {
&& !participants[channel]) {
participant_count++;
participants[channel] = 1;
break;

View File

@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
static void gpio_rcar_irq_bus_lock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_get_sync(&p->pdev->dev);
}
static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_irq_request_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
int error;
error = pm_runtime_get_sync(&p->pdev->dev);
if (error < 0)
return error;
return 0;
}
static void gpio_rcar_irq_release_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_put(&p->pdev->dev);
}
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);

View File

@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()

View File

@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
while (amdgpuCrtc->enabled && repcnt--) {
while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */
repcnt = 0;
break;
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};

View File

@ -649,9 +649,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@ -670,6 +667,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;

View File

@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled)
if (adev->pp_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
#endif
return ret;
}

View File

@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
*dp_rate = 270000;
return 0;
}
}
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
}
return -EINVAL;

View File

@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
AMD_PG_STATE_GATE);
cz_enable_vce_dpm(adev, false);
/* TODO: to figure out why vce can't be poweroff. */
/* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
pi->vce_power_gated = true;
} else {
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, true);
cz_enable_vce_dpm(adev, !gate);
}
return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {

View File

@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));

View File

@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3))); /* equal */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);

View File

@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
data.requested_ui_label = power_state_convert(ps);
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
}
break;
case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
default:
break;
}

View File

@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
};
static const pem_event_action *complete_init_event[] = {
unblock_adjust_power_state_tasks,
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks,

View File

@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
} else {
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
}

View File

@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
if (data & 0x400)
if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;

View File

@ -1382,8 +1382,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
{
if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
return drm_atomic_helper_connector_dpms(connector, mode);
else
return drm_helper_connector_dpms(connector, mode);
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.dpms = tda998x_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,

View File

@ -2303,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
*/
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
/*
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
}
/**
@ -2349,22 +2349,20 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
int ret;
if (!IS_ENABLED(CONFIG_PM))
return true;
if (IS_ENABLED(CONFIG_PM)) {
int ret = pm_runtime_get_if_in_use(device);
ret = pm_runtime_get_if_in_use(device);
/*
* In cases runtime PM is disabled by the RPM core and we get an
* -EINVAL return value we are not supposed to call this function,
* since the power state is undefined. This applies atm to the
* late/early system suspend/resume handlers.
*/
WARN_ON_ONCE(ret < 0);
if (ret <= 0)
return false;
/*
* In cases runtime PM is disabled by the RPM core and we get
* an -EINVAL return value we are not supposed to call this
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
WARN_ON_ONCE(ret < 0);
if (ret <= 0)
return false;
}
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);

View File

@ -64,6 +64,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
/* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
drm_crtc_vblank_on(&ipu_crtc->base);
ipu_crtc->enabled = 1;
}
@ -80,6 +81,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
ipu_di_disable(ipu_crtc->di);
ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
drm_crtc_vblank_off(&ipu_crtc->base);
ipu_crtc->enabled = 0;
}

View File

@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
DRM_FORMAT_RGB565,
};
int ipu_plane_irq(struct ipu_plane *ipu_plane)

View File

@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
*dp_rate = 270000;
return 0;
}
}
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
}
return -EINVAL;

View File

@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)

View File

@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
while (radeon_crtc->enabled && repcnt--) {
while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */
repcnt = 0;
break;
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};

View File

@ -1079,10 +1079,8 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@ -1099,8 +1097,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {

View File

@ -563,6 +563,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
};

View File

@ -18,6 +18,7 @@
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include "bus.h"
#include "dev.h"
@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;

View File

@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
.nb_bases = 8,
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
.nb_bases = 12,
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x04_info = {
@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
.nb_bases = 64,
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
};
static const struct host1x_info host1x05_info = {
@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
.nb_bases = 64,
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
};
static struct of_device_id host1x_of_match[] = {
@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
if (host->info->init) {
err = host->info->init(host);
if (err)

View File

@ -96,6 +96,7 @@ struct host1x_info {
int nb_mlocks; /* host1x: number of mlocks */
int (*init)(struct host1x *); /* initialize per SoC ops */
int sync_offset;
u64 dma_mask; /* mask of addressable memory */
};
struct host1x {

View File

@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
struct device_node *of_node;
/* Associate subdevice with the corresponding port node */
of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!of_node) {
dev_info(dev,
"no port@%d node in %s, not using %s%d\n",
i, dev->of_node->full_name,
(i / 2) ? "DI" : "CSI", i % 2);
continue;
}
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register;
}
pdev->dev.of_node = of_node;
pdev->dev.parent = dev;
/* Associate subdevice with the corresponding port node */
pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!pdev->dev.of_node) {
dev_err(dev, "missing port@%d node in %s\n", i,
dev->of_node->full_name);
ret = -ENODEV;
goto err_register;
}
ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata));
if (!ret)
@ -1289,10 +1292,6 @@ static int ipu_probe(struct platform_device *pdev)
ipu->irq_sync = irq_sync;
ipu->irq_err = irq_err;
ret = ipu_irq_init(ipu);
if (ret)
goto out_failed_irq;
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@ -1302,6 +1301,10 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto out_failed_reset;
ret = ipu_irq_init(ipu);
if (ret)
goto out_failed_irq;
/* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
IPU_DISP_GEN);
@ -1324,9 +1327,9 @@ static int ipu_probe(struct platform_device *pdev)
failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
out_failed_reset:
ipu_irq_exit(ipu);
out_failed_irq:
out_failed_reset:
clk_disable_unprepare(ipu->clk);
return ret;
}

View File

@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
GFP_KERNEL);
dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
if (!dev->bsc_regmap)
return -ENOMEM;

View File

@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n");
ib_cache_cleanup_one(device);
goto out;
}

View File

@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
}
}
if (rec->hop_limit > 1 || use_roce) {
if (rec->hop_limit > 0 || use_roce) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid;

View File

@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size,
in_len - sizeof(cmd), out_len - resp_size);
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)

View File

@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen)
struct ib_udata *udata, int buf_size, int *inlen,
int is_xrc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
int ncont;
u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
if (drv_data < 0)
return -EINVAL;
ucmdlen = (drv_data < sizeof(ucmd)) ?
drv_data : sizeof(ucmd);
ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
if (drv_data > sizeof(ucmd) &&
if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
drv_data - sizeof(ucmd)))
udata->inlen - sizeof(ucmd)))
return -EINVAL;
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
if (is_xrc) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@ -170,7 +169,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen)
int *inlen, int is_xrc)
{
int err;
int i;
@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
is_xrc);
else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
is_xrc);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;

View File

@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
/*
* For dynamic growth the aperture size is split into ranges of 128MB of
@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
if (!dev_data)
return;
if (dev_data->domain)
detach_device(dev);
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev);

View File

@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write);
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@ -1015,6 +1019,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
pci_write_config_dword(iommu->dev, 0xf0, 0x90);
}
/*
* Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
* Workaround:
* BIOS should enable ATS write permission check by setting
* L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
*/
static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
{
u32 value;
if ((boot_cpu_data.x86 != 0x15) ||
(boot_cpu_data.x86_model < 0x30) ||
(boot_cpu_data.x86_model > 0x3f))
return;
/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
value = iommu_read_l2(iommu, 0x47);
if (value & BIT(0))
return;
/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
iommu_write_l2(iommu, 0x47, value | BIT(0));
pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
dev_name(&iommu->dev->dev));
}
/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
(0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
(0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
}
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d",
@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
struct amd_iommu *iommu;
u32 offset;
u32 max_offset_lim;
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present)
return -ENODEV;
/* Locate the iommu associated with the device ID */
iommu = amd_iommu_rlookup_table[devid];
/* Check for valid iommu and pc register indexing */
if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
if (WARN_ON((fxn > 0x28) || (fxn & 7)))
return -ENODEV;
offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present || iommu == NULL)
return -ENODEV;
return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
value, is_write);
}

View File

@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions */
if (pdev->is_virtfn)
return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
if (action != BUS_NOTIFY_ADD_DEVICE &&
action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
info = dmar_alloc_pci_notify_info(pdev, action);
@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
down_write(&dmar_global_lock);
if (action == BUS_NOTIFY_ADD_DEVICE)
dmar_pci_bus_add_dev(info);
else if (action == BUS_NOTIFY_DEL_DEVICE)
else if (action == BUS_NOTIFY_REMOVED_DEVICE)
dmar_pci_bus_del_dev(info);
up_write(&dmar_global_lock);

View File

@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
rmrru->devices_cnt);
if(ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt);
}
@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
else if(ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt))
break;

View File

@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
if (!client->dev.platform_data) {
dev_err(&client->dev,
"Neither DT not platform data provided\n");
return EINVAL;
return -EINVAL;
}
flash->platform_data = client->dev.platform_data;
}

View File

@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
}
/* tx 5v detect */
tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
io_write(sd, 0x71, tx_5v);
adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled)
*handled = true;

View File

@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
ent->function = MEDIA_ENT_F_CONN_RF;
break;
default: /* AU0828_VMUX_DEBUG */
ent->function = MEDIA_ENT_F_CONN_TEST;
break;
continue;
}
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);

View File

@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
} while (cputime_to_usecs(delta) > 16);
} while (tb_to_ns(delta) > 16000);
return 0;
}

View File

@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
vol->upd_buf = vmalloc(req->bytes);
vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;

View File

@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (clear_intf)
mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
if (eflag)
if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
mcp251x_write_bits(spi, EFLG, eflag, 0x00);
/* Update can state */

View File

@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev)
{
unregister_candev(dev->netdev);
free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted);
kfree(dev);
free_candev(dev->netdev);
}
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
/* on failure destroy previously created candevs */
icount = i;
for (i = 0; i < icount; i++) {
for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]);
dev->canch[i] = NULL;
}
usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
return rc;
}
@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
for (i = 0; i < GS_MAX_INTF; i++) {
struct gs_can *can = dev->canch[i];
if (!can)
continue;
gs_destroy_candev(can);
}
for (i = 0; i < GS_MAX_INTF; i++)
if (dev->canch[i])
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
}
static const struct usb_device_id gs_usb_table[] = {

View File

@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id)
int i;
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
PCI_DMA_TODEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)

View File

@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
priv->mdio->id);
mdiobus_unregister(priv->mdio);
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
priv->mdio = NULL;
}

View File

@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
goto err_disable_clk;
}
priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (of_phy_is_fixed_link(pdev->dev.of_node)) {
ret = of_phy_register_fixed_link(pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "bad fixed-link spec\n");
goto err_free_bus;
}
priv->phy_node = of_node_get(pdev->dev.of_node);
}
if (!priv->phy_node)
priv->phy_node = of_parse_phandle(pdev->dev.of_node,
"phy-handle", 0);
if (!priv->phy_node) {
dev_err(&pdev->dev, "no PHY specified\n");
ret = -ENODEV;

Some files were not shown because too many files have changed in this diff Show More