diff --git a/.clang-format b/.clang-format index 2ffd69afc1a8..196ca317bd1f 100644 --- a/.clang-format +++ b/.clang-format @@ -107,10 +107,13 @@ ForEachMacros: - 'css_for_each_descendant_post' - 'css_for_each_descendant_pre' - 'device_for_each_child_node' + - 'dma_fence_chain_for_each' - 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane_state' - 'drm_atomic_for_each_plane_damage' + - 'drm_client_for_each_connector_iter' + - 'drm_client_for_each_modeset' - 'drm_connector_for_each_possible_encoder' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' @@ -126,6 +129,7 @@ ForEachMacros: - 'drm_mm_for_each_node_in_range' - 'drm_mm_for_each_node_safe' - 'flow_action_for_each' + - 'for_each_active_dev_scope' - 'for_each_active_drhd_unit' - 'for_each_active_iommu' - 'for_each_available_child_of_node' @@ -153,6 +157,8 @@ ForEachMacros: - 'for_each_cpu_not' - 'for_each_cpu_wrap' - 'for_each_dev_addr' + - 'for_each_dev_scope' + - 'for_each_displayid_db' - 'for_each_dma_cap_mask' - 'for_each_dpcm_be' - 'for_each_dpcm_be_rollback' @@ -169,6 +175,8 @@ ForEachMacros: - 'for_each_evictable_lru' - 'for_each_fib6_node_rt_rcu' - 'for_each_fib6_walker_rt' + - 'for_each_free_mem_pfn_range_in_zone' + - 'for_each_free_mem_pfn_range_in_zone_from' - 'for_each_free_mem_range' - 'for_each_free_mem_range_reverse' - 'for_each_func_rsrc' @@ -178,6 +186,7 @@ ForEachMacros: - 'for_each_ip_tunnel_rcu' - 'for_each_irq_nr' - 'for_each_link_codecs' + - 'for_each_link_platforms' - 'for_each_lru' - 'for_each_matching_node' - 'for_each_matching_node_and_match' @@ -302,7 +311,10 @@ ForEachMacros: - 'ide_port_for_each_present_dev' - 'idr_for_each_entry' - 'idr_for_each_entry_continue' + - 'idr_for_each_entry_continue_ul' - 'idr_for_each_entry_ul' + - 'in_dev_for_each_ifa_rcu' + - 'in_dev_for_each_ifa_rtnl' - 'inet_bind_bucket_for_each' - 'inet_lhash2_for_each_icsk_rcu' - 'key_for_each' @@ -343,8 +355,6 @@ ForEachMacros: - 'media_device_for_each_intf' - 'media_device_for_each_link' - 'media_device_for_each_pad' - - 'mp_bvec_for_each_page' - - 'mp_bvec_for_each_segment' - 'nanddev_io_for_each_page' - 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_private' @@ -381,18 +391,19 @@ ForEachMacros: - 'radix_tree_for_each_slot' - 'radix_tree_for_each_tagged' - 'rbtree_postorder_for_each_entry_safe' + - 'rdma_for_each_block' - 'rdma_for_each_port' - 'resource_list_for_each_entry' - 'resource_list_for_each_entry_safe' - 'rhl_for_each_entry_rcu' - 'rhl_for_each_rcu' - 'rht_for_each' - - 'rht_for_each_from' - 'rht_for_each_entry' - 'rht_for_each_entry_from' - 'rht_for_each_entry_rcu' - 'rht_for_each_entry_rcu_from' - 'rht_for_each_entry_safe' + - 'rht_for_each_from' - 'rht_for_each_rcu' - 'rht_for_each_rcu_from' - '__rq_for_each_bio' diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index d37cbc502936..402636356fbe 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -5,7 +5,7 @@ Scope ----- Hardware issues which result in security problems are a different category -of security bugs than pure software bugs which only affect the Linux +of security bugs than pure software bugs which only affect the Linux kernel. Hardware issues like Meltdown, Spectre, L1TF etc. must be treated @@ -159,7 +159,7 @@ Mitigation development The initial response team sets up an encrypted mailing-list or repurposes an existing one if appropriate. The disclosing party should provide a list -of contacts for all other parties who have already been, or should be +of contacts for all other parties who have already been, or should be, informed about the issue. The response team contacts these parties so they can name experts who should be subscribed to the mailing-list. @@ -217,11 +217,11 @@ an involved disclosed party. The current ambassadors list: AMD IBM Intel - Qualcomm + Qualcomm Trilok Soni - Microsoft + Microsoft Sasha Levin VMware - XEN + Xen Andrew Cooper Canonical Tyler Hicks Debian Ben Hutchings @@ -230,8 +230,8 @@ an involved disclosed party. The current ambassadors list: SUSE Jiri Kosina Amazon - Google - ============== ======================================================== + Google Kees Cook + ============= ======================================================== If you want your organization to be added to the ambassadors list, please contact the hardware security team. The nominated ambassador has to diff --git a/Documentation/riscv/boot-image-header.txt b/Documentation/riscv/boot-image-header.txt index 1b73fea23b39..14b1492f689b 100644 --- a/Documentation/riscv/boot-image-header.txt +++ b/Documentation/riscv/boot-image-header.txt @@ -18,7 +18,7 @@ The following 64-byte header is present in decompressed Linux kernel image. u32 res1 = 0; /* Reserved */ u64 res2 = 0; /* Reserved */ u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */ - u32 res3; /* Reserved for additional RISC-V specific header */ + u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */ u32 res4; /* Reserved for PE COFF offset */ This header format is compliant with PE/COFF header and largely inspired from @@ -37,13 +37,14 @@ Notes: Bits 16:31 - Major version This preserves compatibility across newer and older version of the header. - The current version is defined as 0.1. + The current version is defined as 0.2. -- res3 is reserved for offset to any other additional fields. This makes the - header extendible in future. One example would be to accommodate ISA - extension for RISC-V in future. For current version, it is set to be zero. +- The "magic" field is deprecated as of version 0.2. In a future + release, it may be removed. This originally should have matched up + with the ARM64 header "magic" field, but unfortunately does not. + The "magic2" field replaces it, matching up with the ARM64 header. -- In current header, the flag field has only one field. +- In current header, the flags field has only one field. Bit 0: Kernel endianness. 1 if BE, 0 if LE. - Image size is mandatory for boot loader to load kernel image. Booting will diff --git a/MAINTAINERS b/MAINTAINERS index 84bb34727f81..16fc09defd39 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17732,8 +17732,7 @@ F: include/uapi/linux/dqblk_xfs.h F: include/uapi/linux/fsmap.h XILINX AXI ETHERNET DRIVER -M: Anirudha Sarangi -M: John Linn +M: Radhey Shyam Pandey S: Maintained F: drivers/net/ethernet/xilinx/xilinx_axienet* diff --git a/Makefile b/Makefile index 0cbe8717bdb3..9b08f6383a52 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Bobtail Squid # *DOCUMENTATION* diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi index 3311a982fff8..23fd0224ca90 100644 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi @@ -279,6 +279,7 @@ &sdhi3 { mmc-hs200-1_8v; non-removable; fixed-emmc-driver-type = <1>; + status = "okay"; }; &usb_extal_clk { diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts index 0711170b26b1..3aa2564dfdc2 100644 --- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts +++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts @@ -97,7 +97,7 @@ memory@48000000 { reg = <0x0 0x48000000 0x0 0x18000000>; }; - reg_1p8v: regulator0 { + reg_1p8v: regulator-1p8v { compatible = "regulator-fixed"; regulator-name = "fixed-1.8V"; regulator-min-microvolt = <1800000>; @@ -106,7 +106,7 @@ reg_1p8v: regulator0 { regulator-always-on; }; - reg_3p3v: regulator1 { + reg_3p3v: regulator-3p3v { compatible = "regulator-fixed"; regulator-name = "fixed-3.3V"; regulator-min-microvolt = <3300000>; @@ -115,7 +115,7 @@ reg_3p3v: regulator1 { regulator-always-on; }; - reg_12p0v: regulator1 { + reg_12p0v: regulator-12p0v { compatible = "regulator-fixed"; regulator-name = "D12.0V"; regulator-min-microvolt = <12000000>; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8fc4de0d22b4..7a84c9f1778e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) } } -static bool tm_active_with_fp(struct task_struct *tsk) -{ - return MSR_TM_ACTIVE(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_FP); -} - -static bool tm_active_with_altivec(struct task_struct *tsk) -{ - return MSR_TM_ACTIVE(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_VEC); -} #else static inline void check_if_tm_restore_required(struct task_struct *tsk) { } -static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } -static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ bool strict_msr_control; @@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp); static int restore_fp(struct task_struct *tsk) { - if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { + if (tsk->thread.load_fp) { load_fp_state(¤t->thread.fp_state); current->thread.load_fp++; return 1; @@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); static int restore_altivec(struct task_struct *tsk) { - if (cpu_has_feature(CPU_FTR_ALTIVEC) && - (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { load_vr_state(&tsk->thread.vr_state); tsk->thread.used_vr = 1; tsk->thread.load_vec++; @@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk) if (!tsk->thread.regs) return; + check_if_tm_restore_required(tsk); + usermsr = tsk->thread.regs->msr; if ((usermsr & msr_all_available) == 0) return; msr_check_and_set(msr_all_available); - check_if_tm_restore_required(tsk); WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index d4acf6fa0596..bf60983a58c7 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -630,7 +630,6 @@ static void early_init_this_mmu(void) #ifdef CONFIG_PPC_FSL_BOOK3E if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; - int __maybe_unused cpu = smp_processor_id(); bool map = true; /* use a quarter of the TLBCAM for bolted linear map */ diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h index ef28e106f247..344db5244547 100644 --- a/arch/riscv/include/asm/image.h +++ b/arch/riscv/include/asm/image.h @@ -3,7 +3,8 @@ #ifndef __ASM_IMAGE_H #define __ASM_IMAGE_H -#define RISCV_IMAGE_MAGIC "RISCV" +#define RISCV_IMAGE_MAGIC "RISCV\0\0\0" +#define RISCV_IMAGE_MAGIC2 "RSC\x05" #define RISCV_IMAGE_FLAG_BE_SHIFT 0 #define RISCV_IMAGE_FLAG_BE_MASK 0x1 @@ -23,7 +24,7 @@ #define __HEAD_FLAGS (__HEAD_FLAG(BE)) #define RISCV_HEADER_VERSION_MAJOR 0 -#define RISCV_HEADER_VERSION_MINOR 1 +#define RISCV_HEADER_VERSION_MINOR 2 #define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \ RISCV_HEADER_VERSION_MINOR) @@ -39,9 +40,8 @@ * @version: version * @res1: reserved * @res2: reserved - * @magic: Magic number - * @res3: reserved (will be used for additional RISC-V specific - * header) + * @magic: Magic number (RISC-V specific; deprecated) + * @magic2: Magic number 2 (to match the ARM64 'magic' field pos) * @res4: reserved (will be used for PE COFF offset) * * The intention is for this header format to be shared between multiple @@ -58,7 +58,7 @@ struct riscv_image_header { u32 res1; u64 res2; u64 magic; - u32 res3; + u32 magic2; u32 res4; }; #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 0f1ba17e476f..52eec0c1bf30 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -39,9 +39,9 @@ ENTRY(_start) .word RISCV_HEADER_VERSION .word 0 .dword 0 - .asciz RISCV_IMAGE_MAGIC - .word 0 + .ascii RISCV_IMAGE_MAGIC .balign 4 + .ascii RISCV_IMAGE_MAGIC2 .word 0 .global _start_kernel diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b5fd6e85657c..d1ccc168c071 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1961,6 +1961,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, case KVM_S390_MCHK: irq->u.mchk.mcic = s390int->parm64; break; + case KVM_S390_INT_PFAULT_INIT: + irq->u.ext.ext_params = s390int->parm; + irq->u.ext.ext_params2 = s390int->parm64; + break; + case KVM_S390_RESTART: + case KVM_S390_INT_CLOCK_COMP: + case KVM_S390_INT_CPU_TIMER: + break; + default: + return -EINVAL; } return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f329dcb3f44c..39cff07bf2eb 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1018,6 +1018,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) /* mark all the pages in active slots as dirty */ for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { ms = slots->memslots + slotnr; + if (!ms->dirty_bitmap) + return -EINVAL; /* * The second half of the bitmap is only used on x86, * and would be wasted otherwise, so we put it to good @@ -4323,7 +4325,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; - struct kvm_s390_irq s390irq; + struct kvm_s390_irq s390irq = {}; if (copy_from_user(&s390int, argp, sizeof(s390int))) return -EFAULT; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index ccc88926bc00..9f41a6f5a032 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second { long err; + if (!IS_ENABLED(CONFIG_SYSVIPC)) + return -ENOSYS; + /* No need for backward compatibility. We can start fresh... */ if (call <= SEMTIMEDOP) { switch (call) { case SEMOP: - err = sys_semtimedop(first, ptr, - (unsigned int)second, NULL); + err = ksys_semtimedop(first, ptr, + (unsigned int)second, NULL); goto out; case SEMTIMEDOP: - err = sys_semtimedop(first, ptr, (unsigned int)second, + err = ksys_semtimedop(first, ptr, (unsigned int)second, (const struct __kernel_timespec __user *) - (unsigned long) fifth); + (unsigned long) fifth); goto out; case SEMGET: - err = sys_semget(first, (int)second, (int)third); + err = ksys_semget(first, (int)second, (int)third); goto out; case SEMCTL: { - err = sys_semctl(first, second, - (int)third | IPC_64, - (unsigned long) ptr); + err = ksys_old_semctl(first, second, + (int)third | IPC_64, + (unsigned long) ptr); goto out; } default: @@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second if (call <= MSGCTL) { switch (call) { case MSGSND: - err = sys_msgsnd(first, ptr, (size_t)second, + err = ksys_msgsnd(first, ptr, (size_t)second, (int)third); goto out; case MSGRCV: - err = sys_msgrcv(first, ptr, (size_t)second, fifth, + err = ksys_msgrcv(first, ptr, (size_t)second, fifth, (int)third); goto out; case MSGGET: - err = sys_msgget((key_t)first, (int)second); + err = ksys_msgget((key_t)first, (int)second); goto out; case MSGCTL: - err = sys_msgctl(first, (int)second | IPC_64, ptr); + err = ksys_old_msgctl(first, (int)second | IPC_64, ptr); goto out; default: err = -ENOSYS; @@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second goto out; } case SHMDT: - err = sys_shmdt(ptr); + err = ksys_shmdt(ptr); goto out; case SHMGET: - err = sys_shmget(first, (size_t)second, (int)third); + err = ksys_shmget(first, (size_t)second, (int)third); goto out; case SHMCTL: - err = sys_shmctl(first, (int)second | IPC_64, ptr); + err = ksys_old_shmctl(first, (int)second | IPC_64, ptr); goto out; default: err = -ENOSYS; diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index e65d7fe6489f..5208ba49c89a 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset, * Lower 12 bits encode the number of additional * pages to flush (in addition to the 'cur' page). */ - if (diff >= HV_TLB_FLUSH_UNIT) + if (diff >= HV_TLB_FLUSH_UNIT) { gva_list[gva_n] |= ~PAGE_MASK; - else if (diff) + cur += HV_TLB_FLUSH_UNIT; + } else if (diff) { gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; + cur = end; + } - cur += HV_TLB_FLUSH_UNIT; gva_n++; } while (cur < end); diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 9e5f3c722c33..981fe923a59f 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) BOOT_PARAM_PRESERVE(eddbuf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), + BOOT_PARAM_PRESERVE(secure_boot), BOOT_PARAM_PRESERVE(hdr), BOOT_PARAM_PRESERVE(e820_table), BOOT_PARAM_PRESERVE(eddbuf), diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 74e88e5edd9c..bdc16b0aa7c6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -335,6 +335,7 @@ struct kvm_mmu_page { int root_count; /* Currently serving as active root */ unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ + unsigned long mmu_valid_gen; DECLARE_BITMAP(unsync_child_bitmap, 512); #ifdef CONFIG_X86_32 @@ -856,6 +857,7 @@ struct kvm_arch { unsigned long n_requested_mmu_pages; unsigned long n_max_mmu_pages; unsigned int indirect_shadow_pages; + unsigned long mmu_valid_gen; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; /* * Hash table of struct kvm_mmu_page. diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 9c4435307ff8..35c225ede0e4 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -444,8 +444,10 @@ __pu_label: \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ + __typeof__(ptr) __gu_ptr = (ptr); \ + __typeof__(size) __gu_size = (size); \ __uaccess_begin_nospec(); \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __builtin_expect(__gu_err, 0); \ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index dba2828b779a..08fb79f37793 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -834,6 +834,10 @@ bool __init apic_needs_pit(void) if (!boot_cpu_has(X86_FEATURE_APIC)) return true; + /* Virt guests may lack ARAT, but still have DEADLINE */ + if (!boot_cpu_has(X86_FEATURE_ARAT)) + return true; + /* Deadline timer is based on TSC so no further PIT action required */ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return false; @@ -1179,10 +1183,6 @@ void clear_local_APIC(void) apic_write(APIC_LVT0, v | APIC_LVT_MASKED); v = apic_read(APIC_LVT1); apic_write(APIC_LVT1, v | APIC_LVT_MASKED); - if (!x2apic_enabled()) { - v = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - apic_write(APIC_LDR, v); - } if (maxlvt >= 4) { v = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 218b277bfda3..a63964e7cec7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2095,6 +2095,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct if (!direct) sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); + + /* + * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() + * depends on valid pages being added to the head of the list. See + * comments in kvm_zap_obsolete_pages(). + */ list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); kvm_mod_used_mmu_pages(vcpu->kvm, +1); return sp; @@ -2244,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, #define for_each_valid_sp(_kvm, _sp, _gfn) \ hlist_for_each_entry(_sp, \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ - if ((_sp)->role.invalid) { \ + if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \ } else #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ @@ -2301,6 +2307,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } static void mmu_audit_disable(void) { } #endif +static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); +} + static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { @@ -2525,6 +2536,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (level > PT_PAGE_TABLE_LEVEL && need_sync) flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); } + sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; clear_page(sp->spt); trace_kvm_mmu_get_page(sp, true); @@ -4233,6 +4245,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, return false; if (cached_root_available(vcpu, new_cr3, new_role)) { + /* + * It is possible that the cached previous root page is + * obsolete because of a change in the MMU generation + * number. However, changing the generation number is + * accompanied by KVM_REQ_MMU_RELOAD, which will free + * the root set here and allocate a new one. + */ kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); if (!skip_tlb_flush) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); @@ -5649,11 +5668,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) return alloc_mmu_pages(vcpu); } + +static void kvm_zap_obsolete_pages(struct kvm *kvm) +{ + struct kvm_mmu_page *sp, *node; + LIST_HEAD(invalid_list); + int ign; + +restart: + list_for_each_entry_safe_reverse(sp, node, + &kvm->arch.active_mmu_pages, link) { + /* + * No obsolete valid page exists before a newly created page + * since active_mmu_pages is a FIFO list. + */ + if (!is_obsolete_sp(kvm, sp)) + break; + + /* + * Do not repeatedly zap a root page to avoid unnecessary + * KVM_REQ_MMU_RELOAD, otherwise we may not be able to + * progress: + * vcpu 0 vcpu 1 + * call vcpu_enter_guest(): + * 1): handle KVM_REQ_MMU_RELOAD + * and require mmu-lock to + * load mmu + * repeat: + * 1): zap root page and + * send KVM_REQ_MMU_RELOAD + * + * 2): if (cond_resched_lock(mmu-lock)) + * + * 2): hold mmu-lock and load mmu + * + * 3): see KVM_REQ_MMU_RELOAD bit + * on vcpu->requests is set + * then return 1 to call + * vcpu_enter_guest() again. + * goto repeat; + * + * Since we are reversely walking the list and the invalid + * list will be moved to the head, skip the invalid page + * can help us to avoid the infinity list walking. + */ + if (sp->role.invalid) + continue; + + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_mmu_commit_zap_page(kvm, &invalid_list); + cond_resched_lock(&kvm->mmu_lock); + goto restart; + } + + if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) + goto restart; + } + + kvm_mmu_commit_zap_page(kvm, &invalid_list); +} + +/* + * Fast invalidate all shadow pages and use lock-break technique + * to zap obsolete pages. + * + * It's required when memslot is being deleted or VM is being + * destroyed, in these cases, we should ensure that KVM MMU does + * not use any resource of the being-deleted slot or all slots + * after calling the function. + */ +static void kvm_mmu_zap_all_fast(struct kvm *kvm) +{ + spin_lock(&kvm->mmu_lock); + kvm->arch.mmu_valid_gen++; + + kvm_zap_obsolete_pages(kvm); + spin_unlock(&kvm->mmu_lock); +} + static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node) { - kvm_mmu_zap_all(kvm); + kvm_mmu_zap_all_fast(kvm); } void kvm_mmu_init_vm(struct kvm *kvm) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index ced9fba32598..a3cba321b5c5 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4540,6 +4540,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) int len; gva_t gva = 0; struct vmcs12 *vmcs12; + struct x86_exception e; short offset; if (!nested_vmx_check_permission(vcpu)) @@ -4588,7 +4589,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) vmx_instruction_info, true, len, &gva)) return 1; /* _system ok, nested_vmx_check_permission has verified cpl=0 */ - kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL); + if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) + kvm_inject_page_fault(vcpu, &e); } return nested_vmx_succeed(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 290c3c3efb87..91602d310a3f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5312,6 +5312,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, /* kvm_write_guest_virt_system can pull in tons of pages. */ vcpu->arch.l1tf_flush_l1d = true; + /* + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED + * is returned, but our callers are not ready for that and they blindly + * call kvm_inject_page_fault. Ensure that they at least do not leak + * uninitialized kernel stack memory into cr2 and error code. + */ + memset(exception, 0, sizeof(*exception)); return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 8901a1f89cf5..10fb42da0007 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -18,37 +18,40 @@ targets += purgatory.ro KASAN_SANITIZE := n KCOV_INSTRUMENT := n +# These are adjustments to the compiler flags used for objects that +# make up the standalone purgatory.ro + +PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel +PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not # sure how to relocate those. ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE) +PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE) endif ifdef CONFIG_STACKPROTECTOR -CFLAGS_REMOVE_sha256.o += -fstack-protector -CFLAGS_REMOVE_purgatory.o += -fstack-protector -CFLAGS_REMOVE_string.o += -fstack-protector -CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector +PURGATORY_CFLAGS_REMOVE += -fstack-protector endif ifdef CONFIG_STACKPROTECTOR_STRONG -CFLAGS_REMOVE_sha256.o += -fstack-protector-strong -CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong -CFLAGS_REMOVE_string.o += -fstack-protector-strong -CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong +PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong endif ifdef CONFIG_RETPOLINE -CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS) -CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS) -CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS) -CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS) +PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS) endif +CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_purgatory.o += $(PURGATORY_CFLAGS) + +CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_sha256.o += $(PURGATORY_CFLAGS) + +CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_string.o += $(PURGATORY_CFLAGS) + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index a0e84538cec8..1fa58c059cbf 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) usb_free_urb(urb); - return 0; + return err; } static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ed455de598ea..a9c35ebb30f8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, @@ -1201,10 +1204,6 @@ static int btusb_open(struct hci_dev *hdev) } data->intf->needs_remote_wakeup = 1; - /* device specific wakeup source enabled and required for USB - * remote wakeup while host is suspended - */ - device_wakeup_enable(&data->udev->dev); /* Disable device remote wakeup when host is suspended * For Realtek chips, global suspend without @@ -1281,7 +1280,6 @@ static int btusb_close(struct hci_dev *hdev) if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) data->intf->needs_remote_wakeup = 1; - device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf); failed: diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index d33828fef89f..e3164c200eac 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work) ws_awake_device); struct hci_uart *hu = qca->hu; unsigned long retrans_delay; + unsigned long flags; BT_DBG("hu %p wq awake device", hu); /* Vote for serial clock */ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); - spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); /* Send wake indication to device */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) @@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work) retrans_delay = msecs_to_jiffies(qca->wake_retrans); mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); - spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); @@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work) struct qca_data *qca = container_of(work, struct qca_data, ws_awake_rx); struct hci_uart *hu = qca->hu; + unsigned long flags; BT_DBG("hu %p wq awake rx", hu); serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); - spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->rx_ibs_state = HCI_IBS_RX_AWAKE; /* Always acknowledge device wake up, @@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work) qca->ibs_sent_wacks++; - spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 9c41a4e42575..1072c450c37a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -192,6 +192,7 @@ struct rcar_dmac_chan { * @iomem: remapped I/O memory base * @n_channels: number of available channels * @channels: array of DMAC channels + * @channels_mask: bitfield of which DMA channels are managed by this driver * @modules: bitmask of client modules in use */ struct rcar_dmac { @@ -202,6 +203,7 @@ struct rcar_dmac { unsigned int n_channels; struct rcar_dmac_chan *channels; + unsigned int channels_mask; DECLARE_BITMAP(modules, 256); }; @@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac) u16 dmaor; /* Clear all channels and enable the DMAC globally. */ - rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); + rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); rcar_dmac_write(dmac, RCAR_DMAOR, RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); @@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) for (i = 0; i < dmac->n_channels; ++i) { struct rcar_dmac_chan *chan = &dmac->channels[i]; + if (!(dmac->channels_mask & BIT(i))) + continue; + /* Stop and reinitialize the channel. */ spin_lock_irq(&chan->lock); rcar_dmac_chan_halt(chan); @@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, return 0; } +#define RCAR_DMAC_MAX_CHANNELS 32 + static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) { struct device_node *np = dev->of_node; @@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) return ret; } - if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { + /* The hardware and driver don't support more than 32 bits in CHCLR */ + if (dmac->n_channels <= 0 || + dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); return -EINVAL; } + dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); + return 0; } @@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; - unsigned int channels_offset = 0; struct dma_device *engine; struct rcar_dmac *dmac; struct resource *mem; @@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) * level we can't disable it selectively, so ignore channel 0 for now if * the device is part of an IOMMU group. */ - if (device_iommu_mapped(&pdev->dev)) { - dmac->n_channels--; - channels_offset = 1; - } + if (device_iommu_mapped(&pdev->dev)) + dmac->channels_mask &= ~BIT(0); dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); @@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev) INIT_LIST_HEAD(&engine->channels); for (i = 0; i < dmac->n_channels; ++i) { - ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], - i + channels_offset); + if (!(dmac->channels_mask & BIT(i))) + continue; + + ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); if (ret < 0) goto error; } diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index baac476c8622..525dc7338fe3 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct dma_slave_config *slave_cfg = &schan->slave_cfg; dma_addr_t src = 0, dst = 0; + dma_addr_t start_src = 0, start_dst = 0; struct sprd_dma_desc *sdesc; struct scatterlist *sg; u32 len = 0; @@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dst = sg_dma_address(sg); } + if (!i) { + start_src = src; + start_dst = dst; + } + /* * The link-list mode needs at least 2 link-list * configurations. If there is only one sg, it doesn't @@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } } - ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, - dir, flags, slave_cfg); + ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src, + start_dst, len, dir, flags, slave_cfg); if (ret) { kfree(sdesc); return NULL; diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index ad2f0a4cd6a4..f255056696ee 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); - if (ret) + if (ret) { + kfree(rsv_events); return ret; + } for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index ba27802efcd0..d07c0d5de7a2 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev) rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, IRQF_SHARED, "omap-dma-engine", od); - if (rc) + if (rc) { + omap_dma_free(od); return rc; + } } if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index f1a9c0544e3f..213aedc97dc2 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = { .read = gpio_mockup_debugfs_read, .write = gpio_mockup_debugfs_write, .llseek = no_llseek, + .release = single_release, }; static void gpio_mockup_debugfs_setup(struct device *dev, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 378b206d2dc9..48fea4c68e8d 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) u8 new_irqs; int level, i; u8 invert_irq_mask[MAX_BANK]; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); if (chip->driver_data & PCA_PCAL) { /* Enable latch on interrupt-enabled inputs */ @@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) bool pending_seen = false; bool trigger_seen = false; u8 trigger[MAX_BANK]; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; int ret, i; if (chip->driver_data & PCA_PCAL) { @@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) return false; /* Remove output pins from the equation */ - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); for (i = 0; i < NBANK(chip); i++) cur_stat[i] &= reg_direction[i]; @@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, { struct i2c_client *client = chip->client; struct irq_chip *irq_chip = &chip->irq_chip; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; int ret, i; if (!client->irq) @@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, * interrupt. We have to rely on the previous read for * this purpose. */ - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); for (i = 0; i < NBANK(chip); i++) chip->irq_stat[i] &= reg_direction[i]; mutex_init(&chip->irq_lock); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 39f2f9035c11..bda28eb82c3f 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -7,6 +7,7 @@ * Mika Westerberg */ +#include #include #include #include @@ -19,6 +20,11 @@ #include "gpiolib.h" +static int run_edge_events_on_boot = -1; +module_param(run_edge_events_on_boot, int, 0444); +MODULE_PARM_DESC(run_edge_events_on_boot, + "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, event->irq_requested = true; /* Make sure we trigger the initial state of edge-triggered IRQs */ - value = gpiod_get_raw_value_cansleep(event->desc); - if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) - event->handler(event->irq, event); + if (run_edge_events_on_boot && + (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { + value = gpiod_get_raw_value_cansleep(event->desc); + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + event->handler(event->irq, event); + } } static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) @@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void) } /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); + +static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), + DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), + } + }, + {} /* Terminating entry */ +}; + +static int acpi_gpio_setup_params(void) +{ + if (run_edge_events_on_boot < 0) { + if (dmi_check_system(run_edge_events_on_boot_blacklist)) + run_edge_events_on_boot = 0; + else + run_edge_events_on_boot = 1; + } + + return 0; +} + +/* Directly after dmi_setup() which runs as core_initcall() */ +postcore_initcall(acpi_gpio_setup_params); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 9762dd6d99fa..9b44c49a9227 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -343,36 +343,27 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, &of_flags); - /* - * -EPROBE_DEFER in our case means that we found a - * valid GPIO property, but no controller has been - * registered so far. - * - * This means we don't need to look any further for - * alternate name conventions, and we should really - * preserve the return code for our user to be able to - * retry probing later. - */ - if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) - return desc; - if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) + if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT) break; } - /* Special handling for SPI GPIOs if used */ - if (IS_ERR(desc)) + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { + /* Special handling for SPI GPIOs if used */ desc = of_find_spi_gpio(dev, con_id, &of_flags); - if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) { + } + + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { /* This quirk looks up flags and all */ desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); if (!IS_ERR(desc)) return desc; } - /* Special handling for regulator GPIOs if used */ - if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { + /* Special handling for regulator GPIOs if used */ desc = of_find_regulator_gpio(dev, con_id, &of_flags); + } if (IS_ERR(desc)) return desc; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cca749010cd0..d9074191edef 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -535,6 +535,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) return -EINVAL; + /* + * Do not allow both INPUT & OUTPUT flags to be set as they are + * contradictory. + */ + if ((lflags & GPIOHANDLE_REQUEST_INPUT) && + (lflags & GPIOHANDLE_REQUEST_OUTPUT)) + return -EINVAL; + /* * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If * the hardware actually supports enabling both at the same time the @@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } /* This is just wrong: we don't look for events on output lines */ - if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || + (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || + (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) { ret = -EINVAL; goto out_free_label; } @@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); ret = gpiod_direction_input(desc); if (ret) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index b0369e690f36..c814bcef18a4 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr, } static int drm_mode_parse_cmdline_extra(const char *str, int length, + bool freestanding, const struct drm_connector *connector, struct drm_cmdline_mode *mode) { @@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length, for (i = 0; i < length; i++) { switch (str[i]) { case 'i': + if (freestanding) + return -EINVAL; + mode->interlace = true; break; case 'm': + if (freestanding) + return -EINVAL; + mode->margins = true; break; case 'D': @@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length, if (extras) { int ret = drm_mode_parse_cmdline_extra(end_ptr + i, 1, + false, connector, mode); if (ret) @@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, return 0; } +static const char * const drm_named_modes_whitelist[] = { + "NTSC", + "PAL", +}; + +static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) + if (!strncmp(mode, drm_named_modes_whitelist[i], size)) + return true; + + return false; +} + /** * drm_mode_parse_command_line_for_connector - parse command line modeline for connector * @mode_option: optional per connector mode option @@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, * bunch of things: * - We need to make sure that the first character (which * would be our resolution in X) is a digit. - * - However, if the X resolution is missing, then we end up - * with something like x, with our first character - * being an alpha-numerical character, which would be - * considered a named mode. + * - If not, then it's either a named mode or a force on/off. + * To distinguish between the two, we need to run the + * extra parsing function, and if not, then we consider it + * a named mode. * * If this isn't enough, we should add more heuristics here, * and matching unit-tests. */ - if (!isdigit(name[0]) && name[0] != 'x') + if (!isdigit(name[0]) && name[0] != 'x') { + unsigned int namelen = strlen(name); + + /* + * Only the force on/off options can be in that case, + * and they all take a single character. + */ + if (namelen == 1) { + ret = drm_mode_parse_cmdline_extra(name, namelen, true, + connector, mode); + if (!ret) + return true; + } + named_mode = true; + } /* Try to locate the bpp and refresh specifiers, if any */ bpp_ptr = strchr(name, '-'); @@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, if (named_mode) { if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) return false; + + if (!drm_named_mode_is_in_whitelist(name, mode_end)) + return false; + strscpy(mode->name, name, mode_end + 1); } else { ret = drm_mode_parse_cmdline_res_mode(name, mode_end, @@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, extra_ptr != options_ptr) { int len = strlen(name) - (extra_ptr - name); - ret = drm_mode_parse_cmdline_extra(extra_ptr, len, + ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false, connector, mode); if (ret) return false; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 18e4cba76720..8aa6a31e8ad0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config); - limits.max_bpp = pipe_config->pipe_bpp; + /* + * FIXME: If all the streams can't fit into the link with + * their current pipe_bpp we should reduce pipe_bpp across + * the board until things start to fit. Until then we + * limit to <= 8bpc since that's what was hardcoded for all + * MST streams previously. This hack should be removed once + * we have the proper retry logic in place. + */ + limits.max_bpp = min(pipe_config->pipe_bpp, 24); intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 2caa594322bc..528b61678334 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -664,15 +664,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - /* - * As this may not be anonymous memory (e.g. shmem) - * but exist on a real mapping, we have to lock - * the page in order to dirty it -- holding - * the page reference is not sufficient to - * prevent the inode from being truncated. - * Play safe and take the lock. - */ - set_page_dirty_lock(page); + set_page_dirty(page); mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 98dfb086320f..99e8242194c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ - if (!IS_COFFEELAKE(i915)) - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index e9f9e9fb9b17..6381652a8829 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev) return ret; } - if (panel) { + if (panel) bridge = devm_drm_panel_bridge_add(dev, panel, - DRM_MODE_CONNECTOR_Unknown); - } + DRM_MODE_CONNECTOR_DPI); priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc), &priv->dma_hwdesc_phys, diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 477c0f766663..b609dc030d6c 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) timeout = drm_timeout_abs_to_jiffies(timeout_ns); ret = drm_gem_reservation_object_wait(file, handle, write, timeout); - if (ret == 0) + if (ret == -ETIME) ret = timeout ? -ETIMEDOUT : -EBUSY; return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c index 84a2f243ed9b..4695f1c8e33f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c @@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); @@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); @@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); @@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h index b45824ec7c8f..6d61a0eb5d64 100644 --- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h +++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h @@ -9,6 +9,13 @@ #define cmdline_test(test) selftest(test, test) +cmdline_test(drm_cmdline_test_force_d_only) +cmdline_test(drm_cmdline_test_force_D_only_dvi) +cmdline_test(drm_cmdline_test_force_D_only_hdmi) +cmdline_test(drm_cmdline_test_force_D_only_not_digital) +cmdline_test(drm_cmdline_test_force_e_only) +cmdline_test(drm_cmdline_test_margin_only) +cmdline_test(drm_cmdline_test_interlace_only) cmdline_test(drm_cmdline_test_res) cmdline_test(drm_cmdline_test_res_missing_x) cmdline_test(drm_cmdline_test_res_missing_y) diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c index 14c96edb13df..013de9d27c35 100644 --- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c +++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c @@ -17,6 +17,136 @@ static const struct drm_connector no_connector = {}; +static int drm_cmdline_test_force_e_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("e", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON); + + return 0; +} + +static int drm_cmdline_test_force_D_only_not_digital(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON); + + return 0; +} + +static const struct drm_connector connector_hdmi = { + .connector_type = DRM_MODE_CONNECTOR_HDMIB, +}; + +static int drm_cmdline_test_force_D_only_hdmi(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &connector_hdmi, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL); + + return 0; +} + +static const struct drm_connector connector_dvi = { + .connector_type = DRM_MODE_CONNECTOR_DVII, +}; + +static int drm_cmdline_test_force_D_only_dvi(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &connector_dvi, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL); + + return 0; +} + +static int drm_cmdline_test_force_d_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("d", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_OFF); + + return 0; +} + +static int drm_cmdline_test_margin_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("m", + &no_connector, + &mode)); + + return 0; +} + +static int drm_cmdline_test_interlace_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("i", + &no_connector, + &mode)); + + return 0; +} + static int drm_cmdline_test_res(void *ignored) { struct drm_cmdline_mode mode = { }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 59e9d05ab928..0af048d1a815 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } - if (retries == RETRIES) { - kfree(reply); + if (!reply) return -EINVAL; - } *msg_len = reply_len; *msg = reply; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..61de81965c44 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) +{ + struct iommu_cmd cmd; + + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain) * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static void increase_address_space(struct protection_domain *domain, gfp_t gfp) { + unsigned long flags; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) /* address space already 64 bit large */ - return false; + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); @@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain, domain->mode += 1; domain->updated = true; - return true; +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return; } static u64 *alloc_pte(struct protection_domain *domain, @@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, { u64 pte_root = 0; u64 flags = 0; + u32 old_domid; if (domain->mode != PAGE_MODE_NONE) pte_root = iommu_virt_to_phys(domain->pt_root); @@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, flags &= ~DEV_DOMID_MASK; flags |= domain->id; + old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; amd_iommu_dev_table[devid].data[1] = flags; amd_iommu_dev_table[devid].data[0] = pte_root; + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } static void clear_dte_entry(u16 devid) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12d094d08c0a..c4e0e4a9ee9e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); +static void domain_context_clear(struct intel_iommu *iommu, + struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -2105,9 +2107,26 @@ static int domain_context_mapping_one(struct dmar_domain *domain, return ret; } +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct pasid_table *table; +}; + +static int domain_context_mapping_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct domain_context_mapping_data *data = opaque; + + return domain_context_mapping_one(data->domain, data->iommu, + data->table, PCI_BUS_NUM(alias), + alias & 0xff); +} + static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { + struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - return domain_context_mapping_one(domain, iommu, table, bus, devfn); + + if (!dev_is_pci(dev)) + return domain_context_mapping_one(domain, iommu, table, + bus, devfn); + + data.domain = domain; + data.iommu = iommu; + data.table = table; + + return pci_for_each_dma_alias(to_pci_dev(dev), + &domain_context_mapping_cb, &data); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -4759,6 +4788,28 @@ int __init intel_iommu_init(void) return ret; } +static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + return 0; +} + +/* + * NB - intel-iommu lacks any sort of reference counting for the users of + * dependent devices. If multiple endpoints have intersecting dependent + * devices, unbinding the driver from any one of them will possibly leave + * the others unable to operate. + */ +static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || !dev || !dev_is_pci(dev)) + return; + + pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); +} + static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear_one(iommu, info->bus, info->devfn); + domain_context_clear(iommu, info->dev); intel_pasid_free_table(info->dev); } diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 780de0caafe8..9b159132405d 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) } static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, - unsigned long address, unsigned long pages, int ih, int gl) + unsigned long address, unsigned long pages, int ih) { struct qi_desc desc; - if (pages == -1) { - /* For global kernel pages we have to flush them in *all* PASIDs - * because that's the only option the hardware gives us. Despite - * the fact that they are actually only accessible through one. */ - if (gl) - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | - QI_EIOTLB_TYPE; - else - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; + /* + * Do PASID granu IOTLB invalidation if page selective capability is + * not available. + */ + if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | + QI_EIOTLB_DID(sdev->did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; desc.qw1 = 0; } else { int mask = ilog2(__roundup_pow_of_two(pages)); @@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; desc.qw1 = QI_EIOTLB_ADDR(address) | - QI_EIOTLB_GL(gl) | QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); } @@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d } static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, - unsigned long pages, int ih, int gl) + unsigned long pages, int ih) { struct intel_svm_dev *sdev; rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) - intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); + intel_flush_svm_range_dev(svm, sdev, address, pages, ih); rcu_read_unlock(); } @@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, start, - (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); + (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); } static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) @@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); } rcu_read_unlock(); @@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) * large and has to be physically contiguous. So it's * hard to be as defensive as we might like. */ intel_pasid_tear_down_entry(iommu, dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 3c3ad42f22bf..c92b405b7646 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos if (!cdev->ap.applid) return -ENODEV; + if (count < CAPIMSG_BASELEN) + return -EINVAL; + skb = alloc_skb(count, GFP_USER); if (!skb) return -ENOMEM; @@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos } mlen = CAPIMSG_LEN(skb->data); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { + if (count < CAPI_DATA_B3_REQ_LEN || + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { kfree_skb(skb); return -EINVAL; } @@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { + if (count < CAPI_DISCONNECT_B3_RESP_LEN) { + kfree_skb(skb); + return -EINVAL; + } mutex_lock(&cdev->lock); capincci_free(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->lock); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 74e4364bc9fb..09113b9ad679 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (index == EXT_CSD_SANITIZE_START) cmd.sanitize_busy = true; - err = mmc_wait_for_cmd(host, &cmd, 0); + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) goto out; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 7e0d3a49c06d..bb31e13648d6 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host) struct dma_chan *terminate_chan = NULL; struct mmc_request *mrq; - cancel_delayed_work_sync(&host->timeout_work); + cancel_delayed_work(&host->timeout_work); mrq = host->mrq; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 64d3b5fb7fe5..4a2872f49a60 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -774,8 +774,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, /* All SDHI have SDIO status bits which must be 1 */ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS; - pm_runtime_enable(&pdev->dev); - ret = renesas_sdhi_clk_enable(host); if (ret) goto efree; @@ -856,8 +854,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, efree: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); - return ret; } EXPORT_SYMBOL_GPL(renesas_sdhi_probe); @@ -869,8 +865,6 @@ int renesas_sdhi_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); renesas_sdhi_clk_disable(host); - pm_runtime_disable(&pdev->dev); - return 0; } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 9dc4548271b4..19944b0049db 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) mmc_hostname(host->mmc)); host->flags &= ~SDHCI_SIGNALING_330; host->flags |= SDHCI_SIGNALING_180; - host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; host->mmc->caps2 |= MMC_CAP2_NO_SD; host->mmc->caps2 |= MMC_CAP2_NO_SDIO; pci_write_config_dword(chip->pdev, @@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = { const struct sdhci_pci_fixes sdhci_o2 = { .probe = sdhci_pci_o2_probe, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, .probe_slot = sdhci_pci_o2_probe_slot, #ifdef CONFIG_PM_SLEEP .resume = sdhci_pci_o2_resume, diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 8539e10784b4..93e83ad25976 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev) host->mmc->f_max = pdata->hclk; host->mmc->f_min = pdata->hclk / 512; - pm_runtime_enable(&pdev->dev); - ret = tmio_mmc_host_probe(host); if (ret) goto host_free; @@ -193,7 +191,6 @@ static int tmio_mmc_probe(struct platform_device *pdev) tmio_mmc_host_remove(host); host_free: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); cell_disable: if (cell->disable) cell->disable(pdev); @@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev) if (cell->disable) cell->disable(pdev); - pm_runtime_disable(&pdev->dev); - return 0; } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c5ba13fae399..2f0b092d6dcc 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -163,6 +163,7 @@ struct tmio_mmc_host { unsigned long last_req_ts; struct mutex ios_lock; /* protect set_ios() context */ bool native_hotplug; + bool runtime_synced; bool sdio_irq_enabled; /* Mandatory callback */ diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 2cb3f951c3e2..9b6e1001e77c 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host) } EXPORT_SYMBOL_GPL(tmio_mmc_host_free); -/** - * tmio_mmc_host_probe() - Common probe for all implementations - * @_host: Host to probe - * - * Perform tasks common to all implementations probe functions. - * - * The caller should have called pm_runtime_enable() prior to calling - * the common probe function. - */ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) { struct platform_device *pdev = _host->pdev; @@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); - pm_runtime_set_active(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 50); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); ret = mmc_add_host(mmc); if (ret) goto remove_host; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + pm_runtime_put(&pdev->dev); return 0; remove_host: + pm_runtime_put_noidle(&pdev->dev); tmio_mmc_host_remove(_host); return ret; } @@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) struct platform_device *pdev = host->pdev; struct mmc_host *mmc = host->mmc; + pm_runtime_get_sync(&pdev->dev); + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); - if (!host->native_hotplug) - pm_runtime_get_sync(&pdev->dev); - dev_pm_qos_hide_latency_limit(&pdev->dev); mmc_remove_host(mmc); @@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) tmio_mmc_release_dma(host); pm_runtime_dont_use_autosuspend(&pdev->dev); + if (host->native_hotplug) + pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); } EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); @@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev) { struct tmio_mmc_host *host = dev_get_drvdata(dev); + if (!host->runtime_synced) { + host->runtime_synced = true; + return 0; + } + tmio_mmc_clk_enable(host); tmio_mmc_hw_reset(host->mmc); diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 49aad9a79c18..91a2be41edf6 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -631,7 +631,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) host->clk_disable = uniphier_sd_clk_disable; host->set_clock = uniphier_sd_set_clock; - pm_runtime_enable(&pdev->dev); ret = uniphier_sd_clk_enable(host); if (ret) goto free_host; @@ -653,7 +652,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) free_host: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); return ret; } @@ -664,7 +662,6 @@ static int uniphier_sd_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); uniphier_sd_clk_disable(host); - pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 58c6231aaa00..87dece0e745d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(3), .msg = "tx_buf_overflow", .reset_level = HNAE3_GLOBAL_RESET }, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4f83f97ffe8b..2e5172f61564 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { if (adapter->state == VNIC_REMOVING || - adapter->state == VNIC_REMOVED) - goto out; + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + } if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; @@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work) netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } -out: + adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dc034f4e8cf6..1ce2397306b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "ixgbe.h" #include "ixgbe_common.h" @@ -2623,7 +2624,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; - } else if (avg_wire_size <= 1980) { + } else if (avg_wire_size < 1968) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; @@ -2656,6 +2657,8 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: + if (avg_wire_size > 8064) + avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; @@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_IPSEC - if (secpath_exists(skb) && + if (xfrm_offload(skb) && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ad802a8909e0..a37dcd140f63 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; - u32 i = tx_ring->next_to_clean, xsk_frames = 0; - unsigned int budget = q_vector->tx.work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; - bool xmit_done; + u32 xsk_frames = 0; - tx_bi = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC(tx_ring, i); - i -= tx_ring->count; + tx_bi = &tx_ring->tx_buffer_info[ntc]; + tx_desc = IXGBE_TX_DESC(tx_ring, ntc); - do { + while (ntc != ntu) { if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; @@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, tx_bi++; tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; + ntc++; + if (unlikely(ntc == tx_ring->count)) { + ntc = 0; tx_bi = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); + } - /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; + tx_ring->next_to_clean = ntc; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; @@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); } - xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); - - return budget > 0 && xmit_done; + return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75e93ce2ed99..076f2da36f27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "ixgbevf.h" @@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->protocol = vlan_get_protocol(skb); #ifdef CONFIG_IXGBEVF_IPSEC - if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ef3f3d06ff1e..fce9b3a24347 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_dev_port(dev, i, &port_cap)) { mlx4_err(dev, - "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n"); } else if ((dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_DEFAULT) && (port_cap.dmfs_optimized_state == diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index d0a01e8f000a..b339125b2f09 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); if (!laddr) { - printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); - dev_kfree_skb(skb); - return NETDEV_TX_BUSY; + pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index d5bbe3d6048b..05981b54eaab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) type = cmsg_hdr->type; switch (type) { - case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: - nfp_flower_cmsg_portreify_rx(app, skb); - break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; @@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) struct nfp_flower_priv *priv = app->priv; struct sk_buff_head *skb_head; - if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || - type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) + if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) skb_head = &priv->cmsg_skbs_high; else skb_head = &priv->cmsg_skbs_low; @@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { /* Acks from the NFP that the route is added - ignore. */ dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) { + /* Handle REIFY acks outside wq to prevent RTNL conflict. */ + nfp_flower_cmsg_portreify_rx(app, skb); + dev_consume_skb_any(skb); } else { nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ecca794c55e2..05d2b478c99b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -713,6 +713,21 @@ struct nv_skb_map { struct nv_skb_map *next_tx_ctx; }; +struct nv_txrx_stats { + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; +}; + +#define nv_txrx_stats_inc(member) \ + __this_cpu_inc(np->txrx_stats->member) +#define nv_txrx_stats_add(member, count) \ + __this_cpu_add(np->txrx_stats->member, (count)) + /* * SMP locking: * All hardware access under netdev_priv(dev)->lock, except the performance @@ -797,10 +812,7 @@ struct fe_priv { /* RX software stats */ struct u64_stats_sync swstats_rx_syncp; - u64 stat_rx_packets; - u64 stat_rx_bytes; /* not always available in HW */ - u64 stat_rx_missed_errors; - u64 stat_rx_dropped; + struct nv_txrx_stats __percpu *txrx_stats; /* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); @@ -826,9 +838,6 @@ struct fe_priv { /* TX software stats */ struct u64_stats_sync swstats_tx_syncp; - u64 stat_tx_packets; /* not always available in HW */ - u64 stat_tx_bytes; - u64 stat_tx_dropped; /* msi/msi-x fields */ u32 msi_flags; @@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev) } } +static void nv_get_stats(int cpu, struct fe_priv *np, + struct rtnl_link_stats64 *storage) +{ + struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); + unsigned int syncp_start; + u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; + u64 tx_packets, tx_bytes, tx_dropped; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + rx_packets = src->stat_rx_packets; + rx_bytes = src->stat_rx_bytes; + rx_dropped = src->stat_rx_dropped; + rx_missed_errors = src->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + + storage->rx_packets += rx_packets; + storage->rx_bytes += rx_bytes; + storage->rx_dropped += rx_dropped; + storage->rx_missed_errors += rx_missed_errors; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + tx_packets = src->stat_tx_packets; + tx_bytes = src->stat_tx_bytes; + tx_dropped = src->stat_tx_dropped; + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + + storage->tx_packets += tx_packets; + storage->tx_bytes += tx_bytes; + storage->tx_dropped += tx_dropped; +} + /* * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. @@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); - unsigned int syncp_start; + int cpu; /* * Note: because HW stats are not always available and for @@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) */ /* software stats */ - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); - storage->rx_packets = np->stat_rx_packets; - storage->rx_bytes = np->stat_rx_bytes; - storage->rx_dropped = np->stat_rx_dropped; - storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); - - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); - storage->tx_packets = np->stat_tx_packets; - storage->tx_bytes = np->stat_tx_bytes; - storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + for_each_online_cpu(cpu) + nv_get_stats(cpu, np, storage); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev) } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev) } if (nv_release_txskb(np, &np->tx_skb[i])) { u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); } np->tx_skb[i].dma = 0; @@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) nv_legacybackoff_reseed(dev); } } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } @@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) } } +static void rx_missing_handler(u32 flags, struct fe_priv *np) +{ + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + nv_txrx_stats_inc(stat_rx_missed_errors); + u64_stats_update_end(&np->swstats_rx_syncp); + } +} + static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); @@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_missed_errors++; - u64_stats_update_end(&np->swstats_rx_syncp); - } + rx_missing_handler(flags, np); dev_kfree_skb(skb); goto next_pkt; } @@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) @@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); } else { dev_kfree_skb(skb); @@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) SET_NETDEV_DEV(dev, &pci_dev->dev); u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); + np->txrx_stats = alloc_percpu(struct nv_txrx_stats); + if (!np->txrx_stats) { + pr_err("np->txrx_stats, alloc memory error.\n"); + err = -ENOMEM; + goto out_alloc_percpu; + } timer_setup(&np->oom_kick, nv_do_rx_refill, 0); timer_setup(&np->nic_poll, nv_do_nic_poll, 0); @@ -6060,6 +6110,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) out_disable: pci_disable_device(pci_dev); out_free: + free_percpu(np->txrx_stats); +out_alloc_percpu: free_netdev(dev); out: return err; @@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev) static void nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + + free_percpu(np->txrx_stats); unregister_netdev(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4083019c547a..f97a4096f8fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) int ret; u32 reg, val; - regmap_field_read(gmac->regmap_field, &val); + ret = regmap_field_read(gmac->regmap_field, &val); + if (ret) { + dev_err(priv->device, "Fail to read from regmap field.\n"); + return ret; + } + reg = gmac->variant->default_syscon_value; if (reg != val) dev_warn(priv->device, diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 331c16d30d5d..23281aeeb222 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd) sp->dev->stats.rx_bytes += count; - if ((skb = dev_alloc_skb(count)) == NULL) + if ((skb = dev_alloc_skb(count + 1)) == NULL) goto out_mem; - ptr = skb_put(skb, count); + ptr = skb_put(skb, count + 1); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a45c5de96ab1..a5a57ca94c1a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat * Local device Link partner * Pause AsymDir Pause AsymDir Result * 1 X 1 X TX+RX - * 0 1 1 1 RX - * 1 1 0 1 TX + * 0 1 1 1 TX + * 1 1 0 1 RX */ static void phylink_resolve_flow(struct phylink *pl, struct phylink_link_state *state) @@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl, new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; else if (pause & MLO_PAUSE_ASYM) new_pause = state->pause & MLO_PAUSE_SYM ? - MLO_PAUSE_RX : MLO_PAUSE_TX; + MLO_PAUSE_TX : MLO_PAUSE_RX; } else { new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db16d7a13e00..aab0be40d443 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev) } static int tun_attach(struct tun_struct *tun, struct file *file, - bool skip_filter, bool napi, bool napi_frags) + bool skip_filter, bool napi, bool napi_frags, + bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; @@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, * initialized tfile; otherwise we risk using half-initialized * object. */ - rcu_assign_pointer(tfile->tun, tun); + if (publish_tun) + rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); @@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); + ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; @@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); + ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) goto err_free_flow; err = register_netdevice(tun->dev); if (err < 0) goto err_detach; + /* free_netdev() won't check refcnt, to aovid race + * with dev_put() we need publish tun after registration. + */ + rcu_assign_pointer(tfile->tun, tun); } netif_carrier_on(tun->dev); @@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, - tun->flags & IFF_NAPI_FRAGS); + tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8458e88c18e9..32f53de5b1fe 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) goto bad_desc; } skip: - if (rndis && header.usb_cdc_acm_descriptor && + /* Communcation class functions with bmCapabilities are not + * RNDIS. But some Wireless class RNDIS functions use + * bmCapabilities for their own purpose. The failsafe is + * therefore applied only to Communication class RNDIS + * functions. The rndis test is redundant, but a cheap + * optimization. + */ + if (rndis && is_rndis(&intf->cur_altsetting->desc) && + header.usb_cdc_acm_descriptor && header.usb_cdc_acm_descriptor->bmCapabilities) { dev_dbg(&intf->dev, "ACM capabilities %02x, not really RNDIS?\n", diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4f3de0ac8b0b..ba98e0971b84 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } - if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index d74349628db2..0e6a51525d91 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - lmc_trace(dev, "lmc_runnin_reset_out"); + lmc_trace(dev, "lmc_running_reset_out"); } diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 6642bcb27761..8efb493ceec2 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, "%d\n", result); result = 0; error_cmd: + kfree(cmd); kfree_skb(ack_skb); error_msg_to_dev: error_alloc: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e5ca1f2685b6..e29c47744ef5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (cfg == &iwl_ax101_cfg_qu_hr) - cfg = &iwl_ax101_cfg_quz_hr; - else if (cfg == &iwl_ax201_cfg_qu_hr) - cfg = &iwl_ax201_cfg_quz_hr; - else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; } #endif diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 653d347a9a19..580387f9f12a 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, } vs_ie = (struct ieee_types_header *)vendor_ie; + if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > + IEEE_MAX_IE_SIZE) + return -EINVAL; memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), vs_ie, vs_ie->len + 2); le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 18f7d9bf30b2..0939a8c8f3ab 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) + return; memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } @@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); - if (rate_ie) + if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) + return; memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); + } return; } @@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; + if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) + return; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 40c0d536e20d..9d4426f6905f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); } + if (is_mt7630(dev)) { + dev->mt76.cap.has_5ghz = false; + dev_dbg(dev->mt76.dev, "mask out 5GHz support\n"); + } + if (!mt76x02_field_valid(nic_conf1 & 0xff)) nic_conf1 &= 0xff00; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index f84a7df296ea..7705e55aa3d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw) mt76x0e_stop_hw(dev); } +static int +mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76x02_dev *dev = hw->priv; + + if (is_mt7630(dev)) + return -EOPNOTSUPP; + + return mt76x02_set_key(hw, cmd, vif, sta, key); +} + static void mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) @@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .configure_filter = mt76x02_configure_filter, .bss_info_changed = mt76x02_bss_info_changed, .sta_state = mt76_sta_state, - .set_key = mt76x02_set_key, + .set_key = mt76x0e_set_key, .conf_tx = mt76x02_conf_tx, .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index ecbe78b8027b..f1cdcd61c54a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev, offset = MAC_IVEIV_ENTRY(key->hw_key_idx); - rt2800_register_multiread(rt2x00dev, offset, - &iveiv_entry, sizeof(iveiv_entry)); - if ((crypto->cipher == CIPHER_TKIP) || - (crypto->cipher == CIPHER_TKIP_NO_MIC) || - (crypto->cipher == CIPHER_AES)) - iveiv_entry.iv[3] |= 0x20; - iveiv_entry.iv[3] |= key->keyidx << 6; + if (crypto->cmd == SET_KEY) { + rt2800_register_multiread(rt2x00dev, offset, + &iveiv_entry, sizeof(iveiv_entry)); + if ((crypto->cipher == CIPHER_TKIP) || + (crypto->cipher == CIPHER_TKIP_NO_MIC) || + (crypto->cipher == CIPHER_AES)) + iveiv_entry.iv[3] |= 0x20; + iveiv_entry.iv[3] |= key->keyidx << 6; + } else { + memset(&iveiv_entry, 0, sizeof(iveiv_entry)); + } + rt2800_register_multiwrite(rt2x00dev, offset, &iveiv_entry, sizeof(iveiv_entry)); } @@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, switch (rt2x00dev->default_ant.rx_chain_num) { case 3: /* Turn on tertiary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1); /* fall-through */ case 2: /* Turn on secondary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); /* fall-through */ case 1: /* Turn on primary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); break; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index f5048d4b8cb6..760eaffeebd6 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -645,7 +645,6 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, kfree(rsi_dev->tx_buffer); fail_eps: - kfree(rsi_dev); return status; } diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index e42850095892..7eda62a9e0df 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context) &echo_response); if (result) { dev_err(&st95context->spicontext.spidev->dev, - "err: echo response receieve error = 0x%x\n", result); + "err: echo response receive error = 0x%x\n", result); return result; } diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3e7b11cf1aae..cb98b8fe786e 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) resource_size_t start, size; struct nd_region *nd_region; unsigned long npfns, align; + u32 end_trunc; struct nd_pfn_sb *pfn_sb; phys_addr_t offset; const char *sig; @@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) size = resource_size(&nsio->res); npfns = PHYS_PFN(size - SZ_8K); align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); + end_trunc = start + size - ALIGN_DOWN(start + size, align); if (nd_pfn->mode == PFN_MODE_PMEM) { /* * The altmap should be padded out to the block size used @@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) return -ENXIO; } - npfns = PHYS_PFN(size - offset); + npfns = PHYS_PFN(size - offset - end_trunc); pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); @@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); pfn_sb->version_major = cpu_to_le16(1); pfn_sb->version_minor = cpu_to_le16(3); + pfn_sb->end_trunc = cpu_to_le32(end_trunc); pfn_sb->align = cpu_to_le32(nd_pfn->align); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index ba6438ac4d72..ff84d1afd229 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -2552,7 +2552,7 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx, if (IS_ERR(map)) return map; } else - map = ERR_PTR(-ENODEV); + return ERR_PTR(-ENODEV); ctx->maps[ASPEED_IP_LPC] = map; dev_dbg(ctx->dev, "Acquired LPC regmap"); @@ -2562,6 +2562,33 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx, return ERR_PTR(-EINVAL); } +static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, + bool enabled) +{ + int ret; + int i; + + for (i = 0; i < expr->ndescs; i++) { + const struct aspeed_sig_desc *desc = &expr->descs[i]; + struct regmap *map; + + map = aspeed_g5_acquire_regmap(ctx, desc->ip); + if (IS_ERR(map)) { + dev_err(ctx->dev, + "Failed to acquire regmap for IP block %d\n", + desc->ip); + return PTR_ERR(map); + } + + ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]); + if (ret <= 0) + return ret; + } + + return 1; +} + /** * Configure a pin's signal by applying an expression's descriptor state for * all descriptors in the expression. @@ -2647,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx, } static const struct aspeed_pinmux_ops aspeed_g5_ops = { + .eval = aspeed_g5_sig_expr_eval, .set = aspeed_g5_sig_expr_set, }; diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c index 839c01b7953f..57305ca838a7 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.c +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c @@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, * neither the enabled nor disabled state. Thus we must explicitly test for * either condition as required. */ -int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, +int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled) { - int i; int ret; + int i; + + if (ctx->ops->eval) + return ctx->ops->eval(ctx, expr, enabled); for (i = 0; i < expr->ndescs; i++) { const struct aspeed_sig_desc *desc = &expr->descs[i]; diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index 52d299b59ce2..db3457c86f48 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -702,6 +702,8 @@ struct aspeed_pin_function { struct aspeed_pinmux_data; struct aspeed_pinmux_ops { + int (*eval)(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, bool enabled); int (*set)(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled); }; @@ -722,9 +724,8 @@ struct aspeed_pinmux_data { int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled, struct regmap *map); -int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, - const struct aspeed_sig_expr *expr, - bool enabled); +int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, bool enabled); static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c index 584284938ac9..d2f804dbc785 100644 --- a/drivers/regulator/act8945a-regulator.c +++ b/drivers/regulator/act8945a-regulator.c @@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) reg = ACT8945A_DCDC3_CTRL; break; case ACT8945A_ID_LDO1: - reg = ACT8945A_LDO1_SUS; + reg = ACT8945A_LDO1_CTRL; break; case ACT8945A_ID_LDO2: - reg = ACT8945A_LDO2_SUS; + reg = ACT8945A_LDO2_CTRL; break; case ACT8945A_ID_LDO3: - reg = ACT8945A_LDO3_SUS; + reg = ACT8945A_LDO3_CTRL; break; case ACT8945A_ID_LDO4: - reg = ACT8945A_LDO4_SUS; + reg = ACT8945A_LDO4_CTRL; break; default: return -EINVAL; diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 04b732991d69..4d859fef55e6 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np, ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np, "enable-gpios", 0, gflags, "gpio-en-ldo"); - if (ena_gpiod) { + if (!IS_ERR(ena_gpiod)) { config->ena_gpiod = ena_gpiod; devm_gpiod_unhinge(chip->dev, config->ena_gpiod); } @@ -459,7 +459,7 @@ static int slg51000_i2c_probe(struct i2c_client *client, GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE, "slg51000-cs"); - if (cs_gpiod) { + if (!IS_ERR(cs_gpiod)) { dev_info(dev, "Found chip selector property\n"); chip->cs_gpiod = cs_gpiod; } diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 6fa15b2d6fb3..866b4dd01da9 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = { 2500, 2750, }; +/* 600mV to 1450mV in 12.5 mV steps */ +static const struct regulator_linear_range VDD1_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500) +}; + +/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */ +static const struct regulator_linear_range VDD2_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500), + REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500) +}; + static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev) } static const struct regulator_ops twl4030smps_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage = twl4030smps_set_voltage, .get_voltage = twl4030smps_get_voltage, }; @@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \ }, \ } -#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ +#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \ + n_volt) \ static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ @@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \ .owner = THIS_MODULE, \ .enable_time = turnon_delay, \ .of_map_mode = twl4030reg_map_mode, \ + .n_voltages = n_volt, \ + .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \ + .linear_ranges = label ## _ranges, \ }, \ } @@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00); TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08); -TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08); -TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08); +TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68); +TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69); /* VUSBCP is managed *only* by the USB subchip */ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 8d8c495b5b60..d65558619ab0 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, * 0 = Set nr_hw_queues by the number of CPUs or HW queues. * 1,128 = Manually specify the maximum nr_hw_queue value to be set, * - * Value range is [0,128]. Default value is 8. + * Value range is [0,256]. Default value is 8. */ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 329f7aa7e169..a81ef0293696 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -46,7 +46,7 @@ /* FCP MQ queue count limiting */ #define LPFC_FCP_MQ_THRESHOLD_MIN 0 -#define LPFC_FCP_MQ_THRESHOLD_MAX 128 +#define LPFC_FCP_MQ_THRESHOLD_MAX 256 #define LPFC_FCP_MQ_THRESHOLD_DEF 8 /* Common buffer size to accomidate SCSI and NVME IO buffers */ diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index d5cf953b4337..7d622ea1274e 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, struct geni_wrapper *wrapper = se->wrapper; u32 val; + if (!wrapper) + return -EINVAL; + *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); if (dma_mapping_error(wrapper->dev, *iova)) return -EIO; @@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, struct geni_wrapper *wrapper = se->wrapper; u32 val; + if (!wrapper) + return -EINVAL; + *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(wrapper->dev, *iova)) return -EIO; diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 9e90e969af55..7804869c6a31 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -22,6 +22,12 @@ * Using this limit prevents one virtqueue from starving others. */ #define VHOST_TEST_WEIGHT 0x80000 +/* Max number of packets transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * pkts. + */ +#define VHOST_TEST_PKT_WEIGHT 256 + enum { VHOST_TEST_VQ = 0, VHOST_TEST_VQ_MAX = 1, @@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n) } vhost_add_used_and_signal(&n->dev, vq, head, 0); total_len += len; - if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { - vhost_poll_queue(&vq->poll); + if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) break; - } } mutex_unlock(&vq->mutex); @@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f) dev = &n->dev; vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT); f->private_data = n; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 0536f8526359..36ca2cf419bf 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init); int vhost_poll_start(struct vhost_poll *poll, struct file *file) { __poll_t mask; - int ret = 0; if (poll->wqh) return 0; @@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); if (mask & EPOLLERR) { vhost_poll_stop(poll); - ret = -EINVAL; + return -EINVAL; } - return ret; + return 0; } EXPORT_SYMBOL_GPL(vhost_poll_start); @@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) __vhost_vq_meta_reset(d->vqs[i]); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_map_unprefetch(struct vhost_map *map) -{ - kfree(map->pages); - map->pages = NULL; - map->npages = 0; - map->addr = NULL; -} - -static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) -{ - struct vhost_map *map[VHOST_NUM_ADDRS]; - int i; - - spin_lock(&vq->mmu_lock); - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - map[i] = rcu_dereference_protected(vq->maps[i], - lockdep_is_held(&vq->mmu_lock)); - if (map[i]) - rcu_assign_pointer(vq->maps[i], NULL); - } - spin_unlock(&vq->mmu_lock); - - synchronize_rcu(); - - for (i = 0; i < VHOST_NUM_ADDRS; i++) - if (map[i]) - vhost_map_unprefetch(map[i]); - -} - -static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) -{ - int i; - - vhost_uninit_vq_maps(vq); - for (i = 0; i < VHOST_NUM_ADDRS; i++) - vq->uaddrs[i].size = 0; -} - -static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, - unsigned long start, - unsigned long end) -{ - if (unlikely(!uaddr->size)) - return false; - - return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); -} - -static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct vhost_map *map; - int i; - - if (!vhost_map_range_overlap(uaddr, start, end)) - return; - - spin_lock(&vq->mmu_lock); - ++vq->invalidate_count; - - map = rcu_dereference_protected(vq->maps[index], - lockdep_is_held(&vq->mmu_lock)); - if (map) { - if (uaddr->write) { - for (i = 0; i < map->npages; i++) - set_page_dirty(map->pages[i]); - } - rcu_assign_pointer(vq->maps[index], NULL); - } - spin_unlock(&vq->mmu_lock); - - if (map) { - synchronize_rcu(); - vhost_map_unprefetch(map); - } -} - -static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end)) - return; - - spin_lock(&vq->mmu_lock); - --vq->invalidate_count; - spin_unlock(&vq->mmu_lock); -} - -static int vhost_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - if (!mmu_notifier_range_blockable(range)) - return -EAGAIN; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_start(vq, j, - range->start, - range->end); - } - - return 0; -} - -static void vhost_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_end(vq, j, - range->start, - range->end); - } -} - -static const struct mmu_notifier_ops vhost_mmu_notifier_ops = { - .invalidate_range_start = vhost_invalidate_range_start, - .invalidate_range_end = vhost_invalidate_range_end, -}; - -static void vhost_init_maps(struct vhost_dev *dev) -{ - struct vhost_virtqueue *vq; - int i, j; - - dev->mmu_notifier.ops = &vhost_mmu_notifier_ops; - - for (i = 0; i < dev->nvqs; ++i) { - vq = dev->vqs[i]; - for (j = 0; j < VHOST_NUM_ADDRS; j++) - RCU_INIT_POINTER(vq->maps[j], NULL); - } -} -#endif - static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; - vq->invalidate_count = 0; __vhost_vq_meta_reset(vq); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_reset_vq_maps(vq); -#endif } static int vhost_worker(void *data) @@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev, INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_init_maps(dev); -#endif + for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; @@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev, vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); - spin_lock_init(&vq->mmu_lock); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, @@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev) if (err) goto err_cgroup; -#if VHOST_ARCH_CAN_ACCEL_UACCESS - err = mmu_notifier_register(&dev->mmu_notifier, dev->mm); - if (err) - goto err_mmu_notifier; -#endif - return 0; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS -err_mmu_notifier: - vhost_dev_free_iovecs(dev); -#endif err_cgroup: kthread_stop(worker); dev->worker = NULL; @@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_setup_uaddr(struct vhost_virtqueue *vq, - int index, unsigned long uaddr, - size_t size, bool write) -{ - struct vhost_uaddr *addr = &vq->uaddrs[index]; - - addr->uaddr = uaddr; - addr->size = size; - addr->write = write; -} - -static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) -{ - vhost_setup_uaddr(vq, VHOST_ADDR_DESC, - (unsigned long)vq->desc, - vhost_get_desc_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL, - (unsigned long)vq->avail, - vhost_get_avail_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_USED, - (unsigned long)vq->used, - vhost_get_used_size(vq, vq->num), - true); -} - -static int vhost_map_prefetch(struct vhost_virtqueue *vq, - int index) -{ - struct vhost_map *map; - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct page **pages; - int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE); - int npinned; - void *vaddr, *v; - int err; - int i; - - spin_lock(&vq->mmu_lock); - - err = -EFAULT; - if (vq->invalidate_count) - goto err; - - err = -ENOMEM; - map = kmalloc(sizeof(*map), GFP_ATOMIC); - if (!map) - goto err; - - pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC); - if (!pages) - goto err_pages; - - err = EFAULT; - npinned = __get_user_pages_fast(uaddr->uaddr, npages, - uaddr->write, pages); - if (npinned > 0) - release_pages(pages, npinned); - if (npinned != npages) - goto err_gup; - - for (i = 0; i < npinned; i++) - if (PageHighMem(pages[i])) - goto err_gup; - - vaddr = v = page_address(pages[0]); - - /* For simplicity, fallback to userspace address if VA is not - * contigious. - */ - for (i = 1; i < npinned; i++) { - v += PAGE_SIZE; - if (v != page_address(pages[i])) - goto err_gup; - } - - map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1)); - map->npages = npages; - map->pages = pages; - - rcu_assign_pointer(vq->maps[index], map); - /* No need for a synchronize_rcu(). This function should be - * called by dev->worker so we are serialized with all - * readers. - */ - spin_unlock(&vq->mmu_lock); - - return 0; - -err_gup: - kfree(pages); -err_pages: - kfree(map); -err: - spin_unlock(&vq->mmu_lock); - return err; -} -#endif - void vhost_dev_cleanup(struct vhost_dev *dev) { int i; @@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev) kthread_stop(dev->worker); dev->worker = NULL; } - if (dev->mm) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - mmu_notifier_unregister(&dev->mmu_notifier, dev->mm); -#endif + if (dev->mm) mmput(dev->mm); - } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - for (i = 0; i < dev->nvqs; i++) - vhost_uninit_vq_maps(dev->vqs[i]); -#endif dev->mm = NULL; } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); @@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *((__virtio16 *)&used->ring[vq->num]) = - cpu_to_vhost16(vq, vq->avail_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)); } @@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, struct vring_used_elem *head, int idx, int count) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - size_t size; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - size = count * sizeof(*head); - memcpy(used->ring + idx, head, size); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_to_user(vq, vq->used->ring + idx, head, count * sizeof(*head)); } @@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->flags = cpu_to_vhost16(vq, vq->used_flags); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags); } @@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->idx = cpu_to_vhost16(vq, vq->last_used_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx); } @@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *idx = avail->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *idx, &vq->avail->idx); } static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, __virtio16 *head, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *head = avail->ring[idx & (vq->num - 1)]; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *head, &vq->avail->ring[idx & (vq->num - 1)]); } @@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, __virtio16 *flags) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *flags = avail->flags; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *flags, &vq->avail->flags); } static inline int vhost_get_used_event(struct vhost_virtqueue *vq, __virtio16 *event) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *event = (__virtio16)avail->ring[vq->num]; - rcu_read_unlock(); - return 0; - } - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *event, vhost_used_event(vq)); } static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *idx = used->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_used(vq, *idx, &vq->used->idx); } static inline int vhost_get_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_desc *d; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]); - if (likely(map)) { - d = map->addr; - *desc = *(d + idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } @@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq, return true; } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq) -{ - struct vhost_map __rcu *map; - int i; - - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[i]); - rcu_read_unlock(); - if (unlikely(!map)) - vhost_map_prefetch(vq, i); - } -} -#endif - int vq_meta_prefetch(struct vhost_virtqueue *vq) { unsigned int num = vq->num; - if (!vq->iotlb) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_vq_map_prefetch(vq); -#endif + if (!vq->iotlb) return 1; - } return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && @@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, mutex_lock(&vq->mutex); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Unregister MMU notifer to allow invalidation callback - * can access vq->uaddrs[] without holding a lock. - */ - if (d->mm) - mmu_notifier_unregister(&d->mmu_notifier, d->mm); - - vhost_uninit_vq_maps(vq); -#endif - switch (ioctl) { case VHOST_SET_VRING_NUM: r = vhost_vring_set_num(d, vq, argp); @@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, BUG(); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_setup_vq_uaddr(vq); - - if (d->mm) - mmu_notifier_register(&d->mmu_notifier, d->mm); -#endif - mutex_unlock(&vq->mutex); return r; @@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq, /* If this is an input descriptor, increment that count. */ if (access == VHOST_ACCESS_WO) { *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; @@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* If this is an input descriptor, * increment that count. */ *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 42a8c2a13ab1..e9ed2722b633 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,9 +12,6 @@ #include #include #include -#include -#include -#include struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -83,24 +80,6 @@ enum vhost_uaddr_type { VHOST_NUM_ADDRS = 3, }; -struct vhost_map { - int npages; - void *addr; - struct page **pages; -}; - -struct vhost_uaddr { - unsigned long uaddr; - size_t size; - bool write; -}; - -#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#else -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#endif - /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -111,22 +90,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Read by memory accessors, modified by meta data - * prefetching, MMU notifier and vring ioctl(). - * Synchonrized through mmu_lock (writers) and RCU (writers - * and readers). - */ - struct vhost_map __rcu *maps[VHOST_NUM_ADDRS]; - /* Read by MMU notifier, modified by vring ioctl(), - * synchronized through MMU notifier - * registering/unregistering. - */ - struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS]; -#endif const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; - struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -181,8 +145,6 @@ struct vhost_virtqueue { bool user_be; #endif u32 busyloop_timeout; - spinlock_t mmu_lock; - int invalidate_count; }; struct vhost_msg_node { @@ -196,9 +158,6 @@ struct vhost_msg_node { struct vhost_dev { struct mm_struct *mm; -#ifdef CONFIG_MMU_NOTIFIER - struct mmu_notifier mmu_notifier; -#endif struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index c8be1c4f5b55..bdc08244a648 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unmap_release: err_idx = i; - i = head; + + if (indirect) + i = 0; + else + i = head; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; vring_unmap_one_split(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } if (indirect) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1ff438fd5bc2..eeb75281894e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3628,6 +3628,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb) TASK_UNINTERRUPTIBLE); } +static void end_extent_buffer_writeback(struct extent_buffer *eb) +{ + clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); + smp_mb__after_atomic(); + wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); +} + /* * Lock eb pages and flush the bio if we can't the locks * @@ -3699,8 +3706,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb if (!trylock_page(p)) { if (!flush) { - ret = flush_write_bio(epd); - if (ret < 0) { + int err; + + err = flush_write_bio(epd); + if (err < 0) { + ret = err; failed_page_nr = i; goto err_unlock; } @@ -3715,16 +3725,23 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb /* Unlock already locked pages */ for (i = 0; i < failed_page_nr; i++) unlock_page(eb->pages[i]); + /* + * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it. + * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can + * be made and undo everything done before. + */ + btrfs_tree_lock(eb); + spin_lock(&eb->refs_lock); + set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); + end_extent_buffer_writeback(eb); + spin_unlock(&eb->refs_lock); + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, + fs_info->dirty_metadata_batch); + btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); + btrfs_tree_unlock(eb); return ret; } -static void end_extent_buffer_writeback(struct extent_buffer *eb) -{ - clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); - smp_mb__after_atomic(); - wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); -} - static void set_btree_ioerr(struct page *page) { struct extent_buffer *eb = (struct extent_buffer *)page->private; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6c8297bcfeb7..1bfd7e34f31e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, BTRFS_I(inode), LOG_OTHER_INODE_ALL, 0, LLONG_MAX, ctx); - iput(inode); + btrfs_add_delayed_iput(inode); } } continue; @@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, ret = btrfs_log_inode(trans, root, BTRFS_I(inode), LOG_OTHER_INODE, 0, LLONG_MAX, ctx); if (ret) { - iput(inode); + btrfs_add_delayed_iput(inode); continue; } @@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { - iput(inode); + btrfs_add_delayed_iput(inode); continue; } @@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, } path->slots[0]++; } - iput(inode); + btrfs_add_delayed_iput(inode); } return ret; @@ -5689,7 +5689,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, } if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { - iput(di_inode); + btrfs_add_delayed_iput(di_inode); break; } @@ -5701,7 +5701,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, if (!ret && btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) ret = 1; - iput(di_inode); + btrfs_add_delayed_iput(di_inode); if (ret) goto next_dir_inode; if (ctx->log_new_dentries) { @@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, if (!ret && ctx && ctx->log_new_dentries) ret = log_new_dir_dentries(trans, root, BTRFS_I(dir_inode), ctx); - iput(dir_inode); + btrfs_add_delayed_iput(dir_inode); if (ret) goto out; } @@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, ret = btrfs_log_inode(trans, root, BTRFS_I(inode), LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); - iput(inode); + btrfs_add_delayed_iput(inode); if (ret) return ret; diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index f752d83a9c44..520f1813e789 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -20,6 +20,15 @@ #include #include +struct configfs_fragment { + atomic_t frag_count; + struct rw_semaphore frag_sem; + bool frag_dead; +}; + +void put_fragment(struct configfs_fragment *); +struct configfs_fragment *get_fragment(struct configfs_fragment *); + struct configfs_dirent { atomic_t s_count; int s_dependent_count; @@ -34,6 +43,7 @@ struct configfs_dirent { #ifdef CONFIG_LOCKDEP int s_depth; #endif + struct configfs_fragment *s_frag; }; #define CONFIGFS_ROOT 0x0001 @@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_create_bin_file(struct config_item *, const struct configfs_bin_attribute *); -extern int configfs_make_dirent(struct configfs_dirent *, - struct dentry *, void *, umode_t, int); +extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *, + void *, umode_t, int, struct configfs_fragment *); extern int configfs_dirent_is_ready(struct configfs_dirent *); extern void configfs_hash_and_remove(struct dentry * dir, const char * name); @@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd) { if (!(sd->s_type & CONFIGFS_ROOT)) { kfree(sd->s_iattr); + put_fragment(sd->s_frag); kmem_cache_free(configfs_dir_cachep, sd); } } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 92112915de8e..79fc25aaa8cd 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) #endif /* CONFIG_LOCKDEP */ +static struct configfs_fragment *new_fragment(void) +{ + struct configfs_fragment *p; + + p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL); + if (p) { + atomic_set(&p->frag_count, 1); + init_rwsem(&p->frag_sem); + p->frag_dead = false; + } + return p; +} + +void put_fragment(struct configfs_fragment *frag) +{ + if (frag && atomic_dec_and_test(&frag->frag_count)) + kfree(frag); +} + +struct configfs_fragment *get_fragment(struct configfs_fragment *frag) +{ + if (likely(frag)) + atomic_inc(&frag->frag_count); + return frag; +} + /* * Allocates a new configfs_dirent and links it to the parent configfs_dirent */ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, - void *element, int type) + void *element, int type, + struct configfs_fragment *frag) { struct configfs_dirent * sd; @@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren kmem_cache_free(configfs_dir_cachep, sd); return ERR_PTR(-ENOENT); } + sd->s_frag = get_fragment(frag); list_add(&sd->s_sibling, &parent_sd->s_children); spin_unlock(&configfs_dirent_lock); @@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd, int configfs_make_dirent(struct configfs_dirent * parent_sd, struct dentry * dentry, void * element, - umode_t mode, int type) + umode_t mode, int type, struct configfs_fragment *frag) { struct configfs_dirent * sd; - sd = configfs_new_dirent(parent_sd, element, type); + sd = configfs_new_dirent(parent_sd, element, type, frag); if (IS_ERR(sd)) return PTR_ERR(sd); @@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode) * until it is validated by configfs_dir_set_ready() */ -static int configfs_create_dir(struct config_item *item, struct dentry *dentry) +static int configfs_create_dir(struct config_item *item, struct dentry *dentry, + struct configfs_fragment *frag) { int error; umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; @@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry) return error; error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, - CONFIGFS_DIR | CONFIGFS_USET_CREATING); + CONFIGFS_DIR | CONFIGFS_USET_CREATING, + frag); if (unlikely(error)) return error; @@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl, { int err = 0; umode_t mode = S_IFLNK | S_IRWXUGO; + struct configfs_dirent *p = parent->d_fsdata; - err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode, - CONFIGFS_ITEM_LINK); + err = configfs_make_dirent(p, dentry, sl, mode, + CONFIGFS_ITEM_LINK, p->s_frag); if (!err) { err = configfs_create(dentry, mode, init_symlink); if (err) { @@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item) static int configfs_attach_group(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry); + struct dentry *dentry, + struct configfs_fragment *frag); static void configfs_detach_group(struct config_item *item); static void detach_groups(struct config_group *group) @@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group) * try using vfs_mkdir. Just a thought. */ static int create_default_group(struct config_group *parent_group, - struct config_group *group) + struct config_group *group, + struct configfs_fragment *frag) { int ret; struct configfs_dirent *sd; @@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group, d_add(child, NULL); ret = configfs_attach_group(&parent_group->cg_item, - &group->cg_item, child); + &group->cg_item, child, frag); if (!ret) { sd = child->d_fsdata; sd->s_type |= CONFIGFS_USET_DEFAULT; @@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group, return ret; } -static int populate_groups(struct config_group *group) +static int populate_groups(struct config_group *group, + struct configfs_fragment *frag) { struct config_group *new_group; int ret = 0; list_for_each_entry(new_group, &group->default_groups, group_entry) { - ret = create_default_group(group, new_group); + ret = create_default_group(group, new_group, frag); if (ret) { detach_groups(group); break; @@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g */ static int configfs_attach_item(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry) + struct dentry *dentry, + struct configfs_fragment *frag) { int ret; - ret = configfs_create_dir(item, dentry); + ret = configfs_create_dir(item, dentry, frag); if (!ret) { ret = populate_attrs(item); if (ret) { @@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item) static int configfs_attach_group(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry) + struct dentry *dentry, + struct configfs_fragment *frag) { int ret; struct configfs_dirent *sd; - ret = configfs_attach_item(parent_item, item, dentry); + ret = configfs_attach_item(parent_item, item, dentry, frag); if (!ret) { sd = dentry->d_fsdata; sd->s_type |= CONFIGFS_USET_DIR; @@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item, */ inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); configfs_adjust_dir_dirent_depth_before_populate(sd); - ret = populate_groups(to_config_group(item)); + ret = populate_groups(to_config_group(item), frag); if (ret) { configfs_detach_item(item); d_inode(dentry)->i_flags |= S_DEAD; @@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode struct configfs_dirent *sd; const struct config_item_type *type; struct module *subsys_owner = NULL, *new_item_owner = NULL; + struct configfs_fragment *frag; char *name; sd = dentry->d_parent->d_fsdata; @@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode goto out; } + frag = new_fragment(); + if (!frag) { + ret = -ENOMEM; + goto out; + } + /* Get a working ref for the duration of this function */ parent_item = configfs_get_config_item(dentry->d_parent); type = parent_item->ci_type; @@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode spin_unlock(&configfs_dirent_lock); if (group) - ret = configfs_attach_group(parent_item, item, dentry); + ret = configfs_attach_group(parent_item, item, dentry, frag); else - ret = configfs_attach_item(parent_item, item, dentry); + ret = configfs_attach_item(parent_item, item, dentry, frag); spin_lock(&configfs_dirent_lock); sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; @@ -1406,6 +1449,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode * reference. */ config_item_put(parent_item); + put_fragment(frag); out: return ret; @@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) struct config_item *item; struct configfs_subsystem *subsys; struct configfs_dirent *sd; + struct configfs_fragment *frag; struct module *subsys_owner = NULL, *dead_item_owner = NULL; int ret; @@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) } } while (ret == -EAGAIN); + frag = sd->s_frag; + if (down_write_killable(&frag->frag_sem)) { + spin_lock(&configfs_dirent_lock); + configfs_detach_rollback(dentry); + spin_unlock(&configfs_dirent_lock); + return -EINTR; + } + frag->frag_dead = true; + up_write(&frag->frag_sem); + /* Get a working ref for the duration of this function */ item = configfs_get_config_item(dentry); @@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file) */ err = -ENOENT; if (configfs_dirent_is_ready(parent_sd)) { - file->private_data = configfs_new_dirent(parent_sd, NULL, 0); + file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL); if (IS_ERR(file->private_data)) err = PTR_ERR(file->private_data); else @@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group, { struct configfs_subsystem *subsys = parent_group->cg_subsys; struct dentry *parent; + struct configfs_fragment *frag; int ret; + frag = new_fragment(); + if (!frag) + return -ENOMEM; + mutex_lock(&subsys->su_mutex); link_group(parent_group, group); mutex_unlock(&subsys->su_mutex); @@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group, parent = parent_group->cg_item.ci_dentry; inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); - ret = create_default_group(parent_group, group); + ret = create_default_group(parent_group, group, frag); if (ret) goto err_out; @@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group, configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); spin_unlock(&configfs_dirent_lock); inode_unlock(d_inode(parent)); + put_fragment(frag); return 0; err_out: inode_unlock(d_inode(parent)); mutex_lock(&subsys->su_mutex); unlink_group(group); mutex_unlock(&subsys->su_mutex); + put_fragment(frag); return ret; } EXPORT_SYMBOL(configfs_register_group); @@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group) struct configfs_subsystem *subsys = group->cg_subsys; struct dentry *dentry = group->cg_item.ci_dentry; struct dentry *parent = group->cg_item.ci_parent->ci_dentry; + struct configfs_dirent *sd = dentry->d_fsdata; + struct configfs_fragment *frag = sd->s_frag; - mutex_lock(&subsys->su_mutex); - if (!group->cg_item.ci_parent->ci_group) { - /* - * The parent has already been unlinked and detached - * due to a rmdir. - */ - goto unlink_group; - } - mutex_unlock(&subsys->su_mutex); + down_write(&frag->frag_sem); + frag->frag_dead = true; + up_write(&frag->frag_sem); inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); spin_lock(&configfs_dirent_lock); @@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group) dput(dentry); mutex_lock(&subsys->su_mutex); -unlink_group: unlink_group(group); mutex_unlock(&subsys->su_mutex); } @@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) struct dentry *dentry; struct dentry *root; struct configfs_dirent *sd; + struct configfs_fragment *frag; + + frag = new_fragment(); + if (!frag) + return -ENOMEM; root = configfs_pin_fs(); - if (IS_ERR(root)) + if (IS_ERR(root)) { + put_fragment(frag); return PTR_ERR(root); + } if (!group->cg_item.ci_name) group->cg_item.ci_name = group->cg_item.ci_namebuf; @@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) d_add(dentry, NULL); err = configfs_attach_group(sd->s_element, &group->cg_item, - dentry); + dentry, frag); if (err) { BUG_ON(d_inode(dentry)); d_drop(dentry); @@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) unlink_group(group); configfs_release_fs(); } + put_fragment(frag); return err; } @@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) struct config_group *group = &subsys->su_group; struct dentry *dentry = group->cg_item.ci_dentry; struct dentry *root = dentry->d_sb->s_root; + struct configfs_dirent *sd = dentry->d_fsdata; + struct configfs_fragment *frag = sd->s_frag; if (dentry->d_parent != root) { pr_err("Tried to unregister non-subsystem!\n"); return; } + down_write(&frag->frag_sem); + frag->frag_dead = true; + up_write(&frag->frag_sem); + inode_lock_nested(d_inode(root), I_MUTEX_PARENT); inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 61e4db4390a1..fb65b706cc0d 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -39,40 +39,44 @@ struct configfs_buffer { bool write_in_progress; char *bin_buffer; int bin_buffer_size; + int cb_max_size; + struct config_item *item; + struct module *owner; + union { + struct configfs_attribute *attr; + struct configfs_bin_attribute *bin_attr; + }; }; - -/** - * fill_read_buffer - allocate and fill buffer from item. - * @dentry: dentry pointer. - * @buffer: data buffer for file. - * - * Allocate @buffer->page, if it hasn't been already, then call the - * config_item's show() method to fill the buffer with this attribute's - * data. - * This is called only once, on the file's first read. - */ -static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer) +static inline struct configfs_fragment *to_frag(struct file *file) { - struct configfs_attribute * attr = to_attr(dentry); - struct config_item * item = to_item(dentry->d_parent); - int ret = 0; - ssize_t count; + struct configfs_dirent *sd = file->f_path.dentry->d_fsdata; + + return sd->s_frag; +} + +static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer) +{ + struct configfs_fragment *frag = to_frag(file); + ssize_t count = -ENOENT; if (!buffer->page) buffer->page = (char *) get_zeroed_page(GFP_KERNEL); if (!buffer->page) return -ENOMEM; - count = attr->show(item, buffer->page); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + count = buffer->attr->show(buffer->item, buffer->page); + up_read(&frag->frag_sem); - BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE); - if (count >= 0) { - buffer->needs_read_fill = 0; - buffer->count = count; - } else - ret = count; - return ret; + if (count < 0) + return count; + if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE)) + return -EIO; + buffer->needs_read_fill = 0; + buffer->count = count; + return 0; } /** @@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf static ssize_t configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct configfs_buffer * buffer = file->private_data; + struct configfs_buffer *buffer = file->private_data; ssize_t retval = 0; mutex_lock(&buffer->mutex); if (buffer->needs_read_fill) { - if ((retval = fill_read_buffer(file->f_path.dentry,buffer))) + retval = fill_read_buffer(file, buffer); + if (retval) goto out; } pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", @@ -138,10 +143,8 @@ static ssize_t configfs_read_bin_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { + struct configfs_fragment *frag = to_frag(file); struct configfs_buffer *buffer = file->private_data; - struct dentry *dentry = file->f_path.dentry; - struct config_item *item = to_item(dentry->d_parent); - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); ssize_t retval = 0; ssize_t len = min_t(size_t, count, PAGE_SIZE); @@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf, if (buffer->needs_read_fill) { /* perform first read with buf == NULL to get extent */ - len = bin_attr->read(item, NULL, 0); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + len = buffer->bin_attr->read(buffer->item, NULL, 0); + else + len = -ENOENT; + up_read(&frag->frag_sem); if (len <= 0) { retval = len; goto out; } /* do not exceed the maximum value */ - if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) { + if (buffer->cb_max_size && len > buffer->cb_max_size) { retval = -EFBIG; goto out; } @@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf, buffer->bin_buffer_size = len; /* perform second read to fill buffer */ - len = bin_attr->read(item, buffer->bin_buffer, len); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + len = buffer->bin_attr->read(buffer->item, + buffer->bin_buffer, len); + else + len = -ENOENT; + up_read(&frag->frag_sem); if (len < 0) { retval = len; vfree(buffer->bin_buffer); @@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size return error ? -EFAULT : count; } - -/** - * flush_write_buffer - push buffer to config_item. - * @dentry: dentry to the attribute - * @buffer: data buffer for file. - * @count: number of bytes - * - * Get the correct pointers for the config_item and the attribute we're - * dealing with, then call the store() method for the attribute, - * passing the buffer that we acquired in fill_write_buffer(). - */ - static int -flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count) +flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count) { - struct configfs_attribute * attr = to_attr(dentry); - struct config_item * item = to_item(dentry->d_parent); + struct configfs_fragment *frag = to_frag(file); + int res = -ENOENT; - return attr->store(item, buffer->page, count); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + res = buffer->attr->store(buffer->item, buffer->page, count); + up_read(&frag->frag_sem); + return res; } @@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size static ssize_t configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct configfs_buffer * buffer = file->private_data; + struct configfs_buffer *buffer = file->private_data; ssize_t len; mutex_lock(&buffer->mutex); len = fill_write_buffer(buffer, buf, count); if (len > 0) - len = flush_write_buffer(file->f_path.dentry, buffer, len); + len = flush_write_buffer(file, buffer, len); if (len > 0) *ppos += len; mutex_unlock(&buffer->mutex); @@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct configfs_buffer *buffer = file->private_data; - struct dentry *dentry = file->f_path.dentry; - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); void *tbuf = NULL; ssize_t len; @@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf, /* buffer grows? */ if (*ppos + count > buffer->bin_buffer_size) { - if (bin_attr->cb_max_size && - *ppos + count > bin_attr->cb_max_size) { + if (buffer->cb_max_size && + *ppos + count > buffer->cb_max_size) { len = -EFBIG; goto out; } @@ -349,31 +353,51 @@ configfs_write_bin_file(struct file *file, const char __user *buf, return len; } -static int check_perm(struct inode * inode, struct file * file, int type) +static int __configfs_open_file(struct inode *inode, struct file *file, int type) { - struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent); - struct configfs_attribute * attr = to_attr(file->f_path.dentry); - struct configfs_bin_attribute *bin_attr = NULL; - struct configfs_buffer * buffer; - struct configfs_item_operations * ops = NULL; - int error = 0; + struct dentry *dentry = file->f_path.dentry; + struct configfs_fragment *frag = to_frag(file); + struct configfs_attribute *attr; + struct configfs_buffer *buffer; + int error; - if (!item || !attr) - goto Einval; + error = -ENOMEM; + buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL); + if (!buffer) + goto out; - if (type & CONFIGFS_ITEM_BIN_ATTR) - bin_attr = to_bin_attr(file->f_path.dentry); + error = -ENOENT; + down_read(&frag->frag_sem); + if (unlikely(frag->frag_dead)) + goto out_free_buffer; - /* Grab the module reference for this attribute if we have one */ - if (!try_module_get(attr->ca_owner)) { - error = -ENODEV; - goto Done; + error = -EINVAL; + buffer->item = to_item(dentry->d_parent); + if (!buffer->item) + goto out_free_buffer; + + attr = to_attr(dentry); + if (!attr) + goto out_put_item; + + if (type & CONFIGFS_ITEM_BIN_ATTR) { + buffer->bin_attr = to_bin_attr(dentry); + buffer->cb_max_size = buffer->bin_attr->cb_max_size; + } else { + buffer->attr = attr; } - if (item->ci_type) - ops = item->ci_type->ct_item_ops; - else - goto Eaccess; + buffer->owner = attr->ca_owner; + /* Grab the module reference for this attribute if we have one */ + error = -ENODEV; + if (!try_module_get(buffer->owner)) + goto out_put_item; + + error = -EACCES; + if (!buffer->item->ci_type) + goto out_put_module; + + buffer->ops = buffer->item->ci_type->ct_item_ops; /* File needs write support. * The inode's perms must say it's ok, @@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type) */ if (file->f_mode & FMODE_WRITE) { if (!(inode->i_mode & S_IWUGO)) - goto Eaccess; - + goto out_put_module; if ((type & CONFIGFS_ITEM_ATTR) && !attr->store) - goto Eaccess; - - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write) - goto Eaccess; + goto out_put_module; + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write) + goto out_put_module; } /* File needs read support. @@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type) */ if (file->f_mode & FMODE_READ) { if (!(inode->i_mode & S_IRUGO)) - goto Eaccess; - + goto out_put_module; if ((type & CONFIGFS_ITEM_ATTR) && !attr->show) - goto Eaccess; - - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read) - goto Eaccess; + goto out_put_module; + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read) + goto out_put_module; } - /* No error? Great, allocate a buffer for the file, and store it - * it in file->private_data for easy access. - */ - buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL); - if (!buffer) { - error = -ENOMEM; - goto Enomem; - } mutex_init(&buffer->mutex); buffer->needs_read_fill = 1; buffer->read_in_progress = false; buffer->write_in_progress = false; - buffer->ops = ops; file->private_data = buffer; - goto Done; + up_read(&frag->frag_sem); + return 0; - Einval: - error = -EINVAL; - goto Done; - Eaccess: - error = -EACCES; - Enomem: - module_put(attr->ca_owner); - Done: - if (error && item) - config_item_put(item); +out_put_module: + module_put(buffer->owner); +out_put_item: + config_item_put(buffer->item); +out_free_buffer: + up_read(&frag->frag_sem); + kfree(buffer); +out: return error; } static int configfs_release(struct inode *inode, struct file *filp) { - struct config_item * item = to_item(filp->f_path.dentry->d_parent); - struct configfs_attribute * attr = to_attr(filp->f_path.dentry); - struct module * owner = attr->ca_owner; - struct configfs_buffer * buffer = filp->private_data; + struct configfs_buffer *buffer = filp->private_data; - if (item) - config_item_put(item); - /* After this point, attr should not be accessed. */ - module_put(owner); - - if (buffer) { - if (buffer->page) - free_page((unsigned long)buffer->page); - mutex_destroy(&buffer->mutex); - kfree(buffer); - } + module_put(buffer->owner); + if (buffer->page) + free_page((unsigned long)buffer->page); + mutex_destroy(&buffer->mutex); + kfree(buffer); return 0; } static int configfs_open_file(struct inode *inode, struct file *filp) { - return check_perm(inode, filp, CONFIGFS_ITEM_ATTR); + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR); } static int configfs_open_bin_file(struct inode *inode, struct file *filp) { - return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR); + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR); } -static int configfs_release_bin_file(struct inode *inode, struct file *filp) +static int configfs_release_bin_file(struct inode *inode, struct file *file) { - struct configfs_buffer *buffer = filp->private_data; - struct dentry *dentry = filp->f_path.dentry; - struct config_item *item = to_item(dentry->d_parent); - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); - ssize_t len = 0; - int ret; + struct configfs_buffer *buffer = file->private_data; buffer->read_in_progress = false; if (buffer->write_in_progress) { + struct configfs_fragment *frag = to_frag(file); buffer->write_in_progress = false; - len = bin_attr->write(item, buffer->bin_buffer, - buffer->bin_buffer_size); - + down_read(&frag->frag_sem); + if (!frag->frag_dead) { + /* result of ->release() is ignored */ + buffer->bin_attr->write(buffer->item, + buffer->bin_buffer, + buffer->bin_buffer_size); + } + up_read(&frag->frag_sem); /* vfree on NULL is safe */ vfree(buffer->bin_buffer); buffer->bin_buffer = NULL; @@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp) buffer->needs_read_fill = 1; } - ret = configfs_release(inode, filp); - if (len < 0) - return len; - return ret; + configfs_release(inode, file); + return 0; } @@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL); error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, - CONFIGFS_ITEM_ATTR); + CONFIGFS_ITEM_ATTR, parent_sd->s_frag); inode_unlock(d_inode(dir)); return error; @@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item, inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL); error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode, - CONFIGFS_ITEM_BIN_ATTR); + CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag); inode_unlock(dir->d_inode); return error; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c764cfe456e5..2a03bfeec10a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1403,11 +1403,12 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) return 0; - /* No fileid? Just exit */ - if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) - return 0; + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) { + /* Only a mounted-on-fileid? Just exit */ + if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) + return 0; /* Has the inode gone and changed behind our back? */ - if (nfsi->fileid != fattr->fileid) { + } else if (nfsi->fileid != fattr->fileid) { /* Is this perhaps the mounted-on fileid? */ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && nfsi->fileid == fattr->mounted_on_fileid) @@ -1807,11 +1808,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_display_fhandle_hash(NFS_FH(inode)), atomic_read(&inode->i_count), fattr->valid); - /* No fileid? Just exit */ - if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) - return 0; + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) { + /* Only a mounted-on-fileid? Just exit */ + if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) + return 0; /* Has the inode gone and changed behind our back? */ - if (nfsi->fileid != fattr->fileid) { + } else if (nfsi->fileid != fattr->fileid) { /* Is this perhaps the mounted-on fileid? */ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && nfsi->fileid == fattr->mounted_on_fileid) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f0fd5636fddb..5e88e7e33abe 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, long ______r; \ static struct ftrace_likely_data \ __aligned(4) \ - __section("_ftrace_annotated_branch") \ + __section(_ftrace_annotated_branch) \ ______f = { \ .data.func = __func__, \ .data.file = __FILE__, \ @@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define __trace_if_value(cond) ({ \ static struct ftrace_branch_data \ __aligned(4) \ - __section("_ftrace_branch") \ + __section(_ftrace_branch) \ __if_trace = { \ .func = __func__, \ .file = __FILE__, \ @@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ".popsection\n\t" /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table") +#define __annotate_jump_table __section(.rodata..c_jump_table) #else #define annotate_reachable() @@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr) * visible to the compiler. */ #define __ADDRESSABLE(sym) \ - static void * __section(".discard.addressable") __used \ + static void * __section(.discard.addressable) __used \ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; /** diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h index ceabb01a6a7d..1ecb6b45812c 100644 --- a/include/linux/input/elan-i2c-ids.h +++ b/include/linux/input/elan-i2c-ids.h @@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0618", 0 }, { "ELAN0619", 0 }, { "ELAN061A", 0 }, - { "ELAN061B", 0 }, +/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */ { "ELAN061C", 0 }, { "ELAN061D", 0 }, { "ELAN061E", 0 }, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index f2ae8a006ff8..4fc6454f7ebb 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -346,7 +346,6 @@ enum { #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) #define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) @@ -378,8 +377,6 @@ enum { #define QI_RESP_INVALID 0x1 #define QI_RESP_FAILURE 0xf -#define QI_GRAN_ALL_ALL 0 -#define QI_GRAN_NONG_ALL 1 #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 1e5d86ebdaeb..52bc8e487ef7 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -11,6 +11,7 @@ struct fixed_phy_status { }; struct device_node; +struct gpio_desc; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 88145da7d140..f7c561c4dcdd 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1402,4 +1402,23 @@ static inline unsigned int ksys_personality(unsigned int personality) return old; } +/* for __ARCH_WANT_SYS_IPC */ +long ksys_semtimedop(int semid, struct sembuf __user *tsops, + unsigned int nsops, + const struct __kernel_timespec __user *timeout); +long ksys_semget(key_t key, int nsems, int semflg); +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); +long ksys_msgget(key_t key, int msgflg); +long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); +long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg); +long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, + int msgflg); +long ksys_shmget(key_t key, size_t size, int shmflg); +long ksys_shmdt(char __user *shmaddr); +long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); +long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned int nsops, + const struct old_timespec32 __user *timeout); + #endif diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 4c81846ccce8..ab1ca9e238d2 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -513,7 +513,7 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, struct netlink_callback *cb); int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, - unsigned char *flags, bool skip_oif); + u8 rt_family, unsigned char *flags, bool skip_oif); int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, - int nh_weight); + int nh_weight, u8 rt_family); #endif /* _NET_FIB_H */ diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 95f766c31c90..331ebbc94fe7 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -161,7 +161,8 @@ struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) } static inline -int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh) +int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh, + u8 rt_family) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); int i; @@ -172,7 +173,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh) struct fib_nh_common *nhc = &nhi->fib_nhc; int weight = nhg->nh_entries[i].weight; - if (fib_add_nexthop(skb, nhc, weight) < 0) + if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0) return -EMSGSIZE; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index b22db30c3d88..aa08a7a5f6ac 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -983,7 +983,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); struct xfrm_if_parms { - char name[IFNAMSIZ]; /* name of XFRM device */ int link; /* ifindex of underlying L2 interface */ u32 if_id; /* interface identifyer */ }; @@ -991,7 +990,6 @@ struct xfrm_if_parms { struct xfrm_if { struct xfrm_if __rcu *next; /* next interface in list */ struct net_device *dev; /* virtual device associated with interface */ - struct net_device *phydev; /* physical device */ struct net *net; /* netns for packet i/o */ struct xfrm_if_parms p; /* interface parms */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 1be0e798e362..1fc8faa6e973 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget) __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_semtimedop 192 -__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) +__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop) #endif #define __NR_semop 193 __SYSCALL(__NR_semop, sys_semop) diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h index 4941628a4fb9..5ec88e7548a9 100644 --- a/include/uapi/linux/isdn/capicmd.h +++ b/include/uapi/linux/isdn/capicmd.h @@ -16,6 +16,7 @@ #define CAPI_MSG_BASELEN 8 #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) +#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4) /*----- CAPI commands -----*/ #define CAPI_ALERT 0x01 diff --git a/ipc/util.h b/ipc/util.h index 0fcf8e719b76..5766c61aed0e 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd) *cmd &= ~IPC_64; return version; } -#endif -/* for __ARCH_WANT_SYS_IPC */ -long ksys_semtimedop(int semid, struct sembuf __user *tsops, - unsigned int nsops, - const struct __kernel_timespec __user *timeout); -long ksys_semget(key_t key, int nsems, int semflg); -long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); -long ksys_msgget(key_t key, int msgflg); -long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); -long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg); -long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, - int msgflg); -long ksys_shmget(key_t key, size_t size, int shmflg); -long ksys_shmdt(char __user *shmaddr); -long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); - -/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */ -long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned int nsops, - const struct old_timespec32 __user *timeout); -#ifdef CONFIG_COMPAT long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg); long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr); long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, @@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, int msgflg); long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr); -#endif /* CONFIG_COMPAT */ + +#endif #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3fb50757e812..315798037d6c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1772,16 +1772,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, bitmap_from_u64(mask, stack_mask); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { - /* This can happen if backtracking - * is propagating stack precision where - * caller has larger stack frame - * than callee, but backtrack_insn() should - * have returned -ENOTSUPP. + /* the sequence of instructions: + * 2: (bf) r3 = r10 + * 3: (7b) *(u64 *)(r3 -8) = r0 + * 4: (79) r4 = *(u64 *)(r10 -8) + * doesn't contain jmps. It's backtracked + * as a single block. + * During backtracking insn 3 is not recognized as + * stack access, so at the end of backtracking + * stack slot fp-8 is still marked in stack_mask. + * However the parent state may not have accessed + * fp-8 and it's "unallocated" stack space. + * In such case fallback to conservative. */ - verbose(env, "BUG spi %d stack_size %d\n", - i, func->allocated_stack); - WARN_ONCE(1, "verifier backtracking bug"); - return -EFAULT; + mark_all_scalars_precise(env, st); + return 0; } if (func->stack[i].slot_type[0] != STACK_SPILL) { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 753afbca549f..8be1da1ebd9a 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5255,8 +5255,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent) * if the parent has to be frozen, the child has too. */ cgrp->freezer.e_freeze = parent->freezer.e_freeze; - if (cgrp->freezer.e_freeze) + if (cgrp->freezer.e_freeze) { + /* + * Set the CGRP_FREEZE flag, so when a process will be + * attached to the child cgroup, it will become frozen. + * At this point the new cgroup is unpopulated, so we can + * consider it frozen immediately. + */ + set_bit(CGRP_FREEZE, &cgrp->flags); set_bit(CGRP_FROZEN, &cgrp->flags); + } spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index c5cd852fe86b..3cc8416ec844 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -413,7 +413,7 @@ static int hw_breakpoint_parse(struct perf_event *bp, int register_perf_hw_breakpoint(struct perf_event *bp) { - struct arch_hw_breakpoint hw; + struct arch_hw_breakpoint hw = { }; int err; err = reserve_bp_slot(bp); @@ -461,7 +461,7 @@ int modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, bool check) { - struct arch_hw_breakpoint hw; + struct arch_hw_breakpoint hw = { }; int err; err = hw_breakpoint_parse(bp, attr, &hw); diff --git a/kernel/fork.c b/kernel/fork.c index 2852d0e76ea3..541fd805fb88 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2338,6 +2338,8 @@ struct mm_struct *copy_init_mm(void) * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. + * + * args->exit_signal is expected to be checked for sanity by the caller. */ long _do_fork(struct kernel_clone_args *args) { @@ -2562,6 +2564,14 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, if (copy_from_user(&args, uargs, size)) return -EFAULT; + /* + * Verify that higher 32bits of exit_signal are unset and that + * it is a valid signal + */ + if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || + !valid_signal(args.exit_signal))) + return -EINVAL; + *kargs = (struct kernel_clone_args){ .flags = args.flags, .pidfd = u64_to_user_ptr(args.pidfd), diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 95414ad3506a..98c04ca5fa43 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg) irq = find_first_bit(irqs_resend, nr_irqs); clear_bit(irq, irqs_resend); desc = irq_to_desc(irq); + if (!desc) + continue; local_irq_disable(); desc->handle_irq(desc); local_irq_enable(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 010d578118d6..df9f1fe5689b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5105,37 +5105,40 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) return retval; } -static int sched_read_attr(struct sched_attr __user *uattr, - struct sched_attr *attr, - unsigned int usize) +/* + * Copy the kernel size attribute structure (which might be larger + * than what user-space knows about) to user-space. + * + * Note that all cases are valid: user-space buffer can be larger or + * smaller than the kernel-space buffer. The usual case is that both + * have the same size. + */ +static int +sched_attr_copy_to_user(struct sched_attr __user *uattr, + struct sched_attr *kattr, + unsigned int usize) { - int ret; + unsigned int ksize = sizeof(*kattr); if (!access_ok(uattr, usize)) return -EFAULT; /* - * If we're handed a smaller struct than we know of, - * ensure all the unknown bits are 0 - i.e. old - * user-space does not get uncomplete information. + * sched_getattr() ABI forwards and backwards compatibility: + * + * If usize == ksize then we just copy everything to user-space and all is good. + * + * If usize < ksize then we only copy as much as user-space has space for, + * this keeps ABI compatibility as well. We skip the rest. + * + * If usize > ksize then user-space is using a newer version of the ABI, + * which part the kernel doesn't know about. Just ignore it - tooling can + * detect the kernel's knowledge of attributes from the attr->size value + * which is set to ksize in this case. */ - if (usize < sizeof(*attr)) { - unsigned char *addr; - unsigned char *end; + kattr->size = min(usize, ksize); - addr = (void *)attr + usize; - end = (void *)attr + sizeof(*attr); - - for (; addr < end; addr++) { - if (*addr) - return -EFBIG; - } - - attr->size = usize; - } - - ret = copy_to_user(uattr, attr, attr->size); - if (ret) + if (copy_to_user(uattr, kattr, kattr->size)) return -EFAULT; return 0; @@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr, * sys_sched_getattr - similar to sched_getparam, but with sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. - * @size: sizeof(attr) for fwd/bwd comp. + * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility. * @flags: for future extension. */ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, - unsigned int, size, unsigned int, flags) + unsigned int, usize, unsigned int, flags) { - struct sched_attr attr = { - .size = sizeof(struct sched_attr), - }; + struct sched_attr kattr = { }; struct task_struct *p; int retval; - if (!uattr || pid < 0 || size > PAGE_SIZE || - size < SCHED_ATTR_SIZE_VER0 || flags) + if (!uattr || pid < 0 || usize > PAGE_SIZE || + usize < SCHED_ATTR_SIZE_VER0 || flags) return -EINVAL; rcu_read_lock(); @@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, if (retval) goto out_unlock; - attr.sched_policy = p->policy; + kattr.sched_policy = p->policy; if (p->sched_reset_on_fork) - attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; + kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; if (task_has_dl_policy(p)) - __getparam_dl(p, &attr); + __getparam_dl(p, &kattr); else if (task_has_rt_policy(p)) - attr.sched_priority = p->rt_priority; + kattr.sched_priority = p->rt_priority; else - attr.sched_nice = task_nice(p); + kattr.sched_nice = task_nice(p); #ifdef CONFIG_UCLAMP_TASK - attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; - attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; + kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; + kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; #endif rcu_read_unlock(); - retval = sched_read_attr(uattr, &attr, size); - return retval; + return sched_attr_copy_to_user(uattr, &kattr, usize); out_unlock: rcu_read_unlock(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc9cfeaac8bd..500f5db0de0b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) if (likely(cfs_rq->runtime_remaining > 0)) return; + if (cfs_rq->throttled) + return; /* * if we're unable to extend our runtime we resched so that the active * hierarchy can be throttled @@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, if (!cfs_rq_throttled(cfs_rq)) goto next; + /* By the above check, this should never be true */ + SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); + runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; diff --git a/lib/Kconfig b/lib/Kconfig index f33d66fc0e86..4e6b1c3e4c98 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -631,6 +631,9 @@ config SBITMAP config PARMAN tristate "parman" if COMPILE_TEST +config OBJAGG + tristate "objagg" if COMPILE_TEST + config STRING_SELFTEST tristate "Test string functions" @@ -653,6 +656,3 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool - -config OBJAGG - tristate "objagg" if COMPILE_TEST diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 798275a51887..26de020aae7b 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); struct page *balloon_page_alloc(void) { struct page *page = alloc_page(balloon_mapping_gfp_mask() | - __GFP_NOMEMALLOC | __GFP_NORETRY); + __GFP_NOMEMALLOC | __GFP_NORETRY | + __GFP_NOWARN); return page; } EXPORT_SYMBOL_GPL(balloon_page_alloc); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cdb00c2ef242..c1d3a303d97f 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5660,11 +5660,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_UNKNOWN_CONN_ID); - if (min < hcon->le_conn_min_interval || - max > hcon->le_conn_max_interval) - return send_conn_param_neg_reply(hdev, handle, - HCI_ERROR_INVALID_LL_PARAMS); - if (hci_check_conn_params(min, max, latency, timeout)) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index dfc1edb168b7..da7fdbdf9c41 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -5305,14 +5305,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, memset(&rsp, 0, sizeof(rsp)); - if (min < hcon->le_conn_min_interval || - max > hcon->le_conn_max_interval) { - BT_DBG("requested connection interval exceeds current bounds."); - err = -EINVAL; - } else { - err = hci_check_conn_params(min, max, latency, to_multiplier); - } - + err = hci_check_conn_params(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 44594635a972..da5ed4cf9233 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -466,7 +466,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb, struct nlmsghdr *nlh; struct nlattr *nest; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); if (!nlh) return -EMSGSIZE; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index d3f9592f4ff8..af7800103e51 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -496,6 +496,10 @@ static unsigned int br_nf_pre_routing(void *priv, if (!brnet->call_ip6tables && !br_opt_get(br, BROPT_NF_CALL_IP6TABLES)) return NF_ACCEPT; + if (!ipv6_mod_enabled()) { + pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported."); + return NF_DROP; + } nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(priv, skb, state); diff --git a/net/core/dev.c b/net/core/dev.c index b1afafee3e2a..a9775d676285 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8807,6 +8807,8 @@ int register_netdevice(struct net_device *dev) ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); + rcu_barrier(); + dev->reg_state = NETREG_UNREGISTERED; } /* diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2b40b5a9425b..f12e8a050edb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3670,6 +3670,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int pos; int dummy; + if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && + (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { + /* gso_size is untrusted, and we have a frag_list with a linear + * non head_frag head. + * + * (we assume checking the first list_skb member suffices; + * i.e if either of the list_skb members have non head_frag + * head, then the first one has too). + * + * If head_skb's headlen does not fit requested gso_size, it + * means that the frag_list members do NOT terminate on exact + * gso_size boundaries. Hence we cannot perform skb_frag_t page + * sharing. Therefore we must fallback to copying the frag_list + * skbs; we do so by disabling SG. + */ + if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) + features &= ~NETIF_F_SG; + } + __skb_push(head_skb, doffset); proto = skb_network_protocol(head_skb, &dummy); if (unlikely(!proto)) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 01998860afaa..eb114ee419b6 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -656,6 +656,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key, struct sock *sk, u64 flags) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct inet_connection_sock *icsk = inet_csk(sk); u32 key_size = map->key_size, hash; struct bpf_htab_elem *elem, *elem_new; struct bpf_htab_bucket *bucket; @@ -666,6 +667,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key, WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(flags > BPF_EXIST)) return -EINVAL; + if (unlikely(icsk->icsk_ulp_data)) + return -EINVAL; link = sk_psock_init_link(); if (!link) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 2db089e10ba0..0913a090b2bf 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1582,7 +1582,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, } int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc, - unsigned char *flags, bool skip_oif) + u8 rt_family, unsigned char *flags, bool skip_oif) { if (nhc->nhc_flags & RTNH_F_DEAD) *flags |= RTNH_F_DEAD; @@ -1613,7 +1613,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc, /* if gateway family does not match nexthop family * gateway is encoded as RTA_VIA */ - if (nhc->nhc_gw_family != nhc->nhc_family) { + if (rt_family != nhc->nhc_gw_family) { int alen = sizeof(struct in6_addr); struct nlattr *nla; struct rtvia *via; @@ -1654,7 +1654,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info); #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6) int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, - int nh_weight) + int nh_weight, u8 rt_family) { const struct net_device *dev = nhc->nhc_dev; struct rtnexthop *rtnh; @@ -1667,7 +1667,7 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, rtnh->rtnh_hops = nh_weight - 1; rtnh->rtnh_ifindex = dev ? dev->ifindex : 0; - if (fib_nexthop_info(skb, nhc, &flags, true) < 0) + if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; @@ -1693,13 +1693,14 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi) goto nla_put_failure; if (unlikely(fi->nh)) { - if (nexthop_mpath_fill_node(skb, fi->nh) < 0) + if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0) goto nla_put_failure; goto mp_end; } for_nexthops(fi) { - if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0) + if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, + AF_INET) < 0) goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID if (nh->nh_tclassid && @@ -1775,7 +1776,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); unsigned char flags = 0; - if (fib_nexthop_info(skb, nhc, &flags, false) < 0) + if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0) goto nla_put_failure; rtm->rtm_flags = flags; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 706cbb3b2986..7e94223fdb2b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; } static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 87d2d8c1db7c..98ac32b49d8c 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net) return 0; } -static void __net_init ping_v6_proc_exit_net(struct net *net) +static void __net_exit ping_v6_proc_exit_net(struct net *net) { remove_proc_entry("icmp6", net->proc_net); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 874641d4d2a1..a63ff85fe141 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4386,13 +4386,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, struct fib6_config cfg = { .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL, .fc_ifindex = idev->dev->ifindex, - .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP, + .fc_flags = RTF_UP | RTF_NONEXTHOP, .fc_dst = *addr, .fc_dst_len = 128, .fc_protocol = RTPROT_KERNEL, .fc_nlinfo.nl_net = net, .fc_ignore_dev_down = true, }; + struct fib6_info *f6i; if (anycast) { cfg.fc_type = RTN_ANYCAST; @@ -4402,7 +4403,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, cfg.fc_flags |= RTF_LOCAL; } - return ip6_route_info_create(&cfg, gfp_flags, NULL); + f6i = ip6_route_info_create(&cfg, gfp_flags, NULL); + if (!IS_ERR(f6i)) + f6i->dst_nocount = true; + return f6i; } /* remove deleted ip from prefsrc entries */ @@ -5323,11 +5327,11 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh, if (nexthop_is_multipath(nh)) { struct nlattr *mp; - mp = nla_nest_start(skb, RTA_MULTIPATH); + mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; - if (nexthop_mpath_fill_node(skb, nh)) + if (nexthop_mpath_fill_node(skb, nh, AF_INET6)) goto nla_put_failure; nla_nest_end(skb, mp); @@ -5335,7 +5339,7 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh, struct fib6_nh *fib6_nh; fib6_nh = nexthop_fib6_nh(nh); - if (fib_nexthop_info(skb, &fib6_nh->nh_common, + if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6, flags, false) < 0) goto nla_put_failure; } @@ -5464,13 +5468,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, - rt->fib6_nh->fib_nh_weight) < 0) + rt->fib6_nh->fib_nh_weight, AF_INET6) < 0) goto nla_put_failure; list_for_each_entry_safe(sibling, next_sibling, &rt->fib6_siblings, fib6_siblings) { if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, - sibling->fib6_nh->fib_nh_weight) < 0) + sibling->fib6_nh->fib_nh_weight, + AF_INET6) < 0) goto nla_put_failure; } @@ -5487,7 +5492,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, rtm->rtm_flags |= nh_flags; } else { - if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, + if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6, &nh_flags, false) < 0) goto nla_put_failure; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7c6edb7c5f10..70739e746c13 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1532,7 +1532,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, struct sta_info *sta; struct ieee80211_sub_if_data *sdata; int err; - int layer2_update; if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); @@ -1576,18 +1575,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, test_sta_flag(sta, WLAN_STA_ASSOC)) rate_control_rate_init(sta); - layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_AP; - err = sta_info_insert_rcu(sta); if (err) { rcu_read_unlock(); return err; } - if (layer2_update) - cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); - rcu_read_unlock(); return 0; @@ -1685,10 +1678,11 @@ static int ieee80211_change_station(struct wiphy *wiphy, sta->sdata = vlansdata; ieee80211_check_fast_xmit(sta); - if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { ieee80211_vif_inc_num_mcast(sta->sdata); - - cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); + } } err = sta_apply_parameters(local, sta, params); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index df553070206c..bd11fef2139f 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1979,6 +1979,10 @@ int sta_info_move_state(struct sta_info *sta, ieee80211_check_fast_xmit(sta); ieee80211_check_fast_rx(sta); } + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sta->sdata->vif.type == NL80211_IFTYPE_AP) + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); break; default: break; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 6aa01eb6fe99..e2d13cd18875 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -553,10 +553,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0 || - ctnetlink_dump_timeout(skb, ct) < 0 || ctnetlink_dump_acct(skb, ct, type) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0 || - ctnetlink_dump_protoinfo(skb, ct) < 0 || ctnetlink_dump_helpinfo(skb, ct) < 0 || ctnetlink_dump_mark(skb, ct) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || @@ -568,6 +566,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ctnetlink_dump_ct_synproxy(skb, ct) < 0) goto nla_put_failure; + if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && + (ctnetlink_dump_timeout(skb, ct) < 0 || + ctnetlink_dump_protoinfo(skb, ct) < 0)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return skb->len; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 09310a1bd91f..132f5228b431 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -218,7 +218,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) return err; } - flow->timeout = (u32)jiffies; + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); diff --git a/net/netfilter/nft_fib_netdev.c b/net/netfilter/nft_fib_netdev.c index 2cf3f32fe6d2..a2e726ae7f07 100644 --- a/net/netfilter/nft_fib_netdev.c +++ b/net/netfilter/nft_fib_netdev.c @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -34,6 +35,8 @@ static void nft_fib_netdev_eval(const struct nft_expr *expr, } break; case ETH_P_IPV6: + if (!ipv6_mod_enabled()) + break; switch (priv->result) { case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIFNAME: diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index d7f3776dfd71..637ce3e8c575 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr, return; } - /* So that subsequent socket matching not to require other lookups. */ - skb->sk = sk; - switch(priv->key) { case NFT_SOCKET_TRANSPARENT: nft_reg_store8(dest, inet_sk_transparent(sk)); @@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr, WARN_ON(1); regs->verdict.code = NFT_BREAK; } + + if (sk != skb->sk) + sock_gen_put(sk); } static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = { diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c index ccff1e544c21..e35869e81766 100644 --- a/net/qrtr/tun.c +++ b/net/qrtr/tun.c @@ -84,11 +84,14 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!kbuf) return -ENOMEM; - if (!copy_from_iter_full(kbuf, len, from)) + if (!copy_from_iter_full(kbuf, len, from)) { + kfree(kbuf); return -EFAULT; + } ret = qrtr_endpoint_post(&tun->ep, kbuf, len); + kfree(kbuf); return ret < 0 ? ret : len; } diff --git a/net/rds/bind.c b/net/rds/bind.c index 6dbb763bc1fd..20c156a73e73 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -239,34 +239,30 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } - sock_set_flag(sk, SOCK_RCU_FREE); - ret = rds_add_bound(rs, binding_addr, &port, scope_id); - if (ret) - goto out; - - if (rs->rs_transport) { /* previously bound */ + /* The transport can be set using SO_RDS_TRANSPORT option before the + * socket is bound. + */ + if (rs->rs_transport) { trans = rs->rs_transport; if (trans->laddr_check(sock_net(sock->sk), binding_addr, scope_id) != 0) { ret = -ENOPROTOOPT; - rds_remove_bound(rs); - } else { - ret = 0; + goto out; } - goto out; - } - trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr, - scope_id); - if (!trans) { - ret = -EADDRNOTAVAIL; - rds_remove_bound(rs); - pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", - __func__, binding_addr); - goto out; + } else { + trans = rds_trans_get_preferred(sock_net(sock->sk), + binding_addr, scope_id); + if (!trans) { + ret = -EADDRNOTAVAIL; + pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", + __func__, binding_addr); + goto out; + } + rs->rs_transport = trans; } - rs->rs_transport = trans; - ret = 0; + sock_set_flag(sk, SOCK_RCU_FREE); + ret = rds_add_bound(rs, binding_addr, &port, scope_id); out: release_sock(sk); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index d122c53c8697..157be1ff8697 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1262,8 +1262,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (nskb != skb) { rxrpc_eaten_skb(skb, rxrpc_skb_received); - rxrpc_new_skb(skb, rxrpc_skb_unshared); skb = nskb; + rxrpc_new_skb(skb, rxrpc_skb_unshared); sp = rxrpc_skb(skb); } } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 04faee7ccbce..1047825d9f48 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1920,6 +1920,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, cl = cops->find(q, portid); if (!cl) return; + if (!cops->tcf_block) + return; block = cops->tcf_block(q, cl, NULL); if (!block) return; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 137db1cbde85..ac28f6a5d70e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops); * - updates to tree and tree walking are only done under the rtnl mutex. */ +#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) + static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) { const struct netdev_queue *txq = q->dev_queue; @@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) q->q.qlen--; } } else { - skb = NULL; + skb = SKB_XOFF_MAGIC; } } @@ -253,8 +255,11 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, return skb; skb = qdisc_dequeue_skb_bad_txq(q); - if (unlikely(skb)) + if (unlikely(skb)) { + if (skb == SKB_XOFF_MAGIC) + return NULL; goto bulk; + } skb = q->dequeue(q); if (skb) { bulk: diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index cee6971c1c82..23cd1c873a2c 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt, new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; - if (non_hh_quantum > INT_MAX) + if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX) return -EINVAL; sch_tree_lock(sch); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b48ffe845c31..08d14d86ecfb 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1339,7 +1339,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net) return status; } -static void __net_init sctp_ctrlsock_exit(struct net *net) +static void __net_exit sctp_ctrlsock_exit(struct net *net) { /* Free the control endpoint. */ inet_ctl_sock_destroy(net->sctp.ctl_sock); diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 1cf5bb5b73c4..e52b2128e43b 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, if (net->sctp.pf_enable && (transport->state == SCTP_ACTIVE) && (transport->error_count < transport->pathmaxrxt) && - (transport->error_count > asoc->pf_retrans)) { + (transport->error_count > transport->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_PF, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3e50a9712fb1..939b8d2595bc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -309,7 +309,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) return retval; } -static long sctp_get_port_local(struct sock *, union sctp_addr *); +static int sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, @@ -399,9 +399,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) * detection. */ addr->v4.sin_port = htons(snum); - if ((ret = sctp_get_port_local(sk, addr))) { + if (sctp_get_port_local(sk, addr)) return -EADDRINUSE; - } /* Refresh ephemeral port. */ if (!bp->port) @@ -413,11 +412,13 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, SCTP_ADDR_SRC, GFP_ATOMIC); - /* Copy back into socket for getsockname() use. */ - if (!ret) { - inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); - sp->pf->to_sk_saddr(addr, sk); + if (ret) { + sctp_put_port(sk); + return ret; } + /* Copy back into socket for getsockname() use. */ + inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); + sp->pf->to_sk_saddr(addr, sk); return ret; } @@ -7192,7 +7193,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; - return 0; + goto out; } asoc = sctp_id2assoc(sk, val.spt_assoc_id); @@ -7210,6 +7211,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, val.spt_pathmaxrxt = sp->pathmaxrxt; } +out: if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; @@ -8145,7 +8147,7 @@ static void sctp_unhash(struct sock *sk) static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); -static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) +static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_sock *sp = sctp_sk(sk); bool reuse = (sk->sk_reuse || sp->reuse); @@ -8255,7 +8257,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sp2, sp)) { - ret = (long)sk2; + ret = 1; goto fail_unlock; } } @@ -8327,7 +8329,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum) addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ - return !!sctp_get_port_local(sk, &addr); + return sctp_get_port_local(sk, &addr); } /* diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 61219f0b9677..836e629e8f4a 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr) publ->key); } - kfree_rcu(p, rcu); + if (p) + kfree_rcu(p, rcu); } /** diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 74868f9d81fb..2ab4859df55a 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -145,8 +145,6 @@ static int xfrmi_create(struct net_device *dev) if (err < 0) goto out; - strcpy(xi->p.name, dev->name); - dev_hold(dev); xfrmi_link(xfrmn, xi); @@ -177,7 +175,6 @@ static void xfrmi_dev_uninit(struct net_device *dev) struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id); xfrmi_unlink(xfrmn, xi); - dev_put(xi->phydev); dev_put(dev); } @@ -294,7 +291,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (tdev == dev) { stats->collisions++; net_warn_ratelimited("%s: Local routing loop detected!\n", - xi->p.name); + dev->name); goto tx_err_dst_release; } @@ -364,7 +361,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_err; } - fl.flowi_oif = xi->phydev->ifindex; + fl.flowi_oif = xi->p.link; ret = xfrmi_xmit2(skb, dev, &fl); if (ret < 0) @@ -505,7 +502,7 @@ static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p) static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p) { - struct net *net = dev_net(xi->dev); + struct net *net = xi->net; struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); int err; @@ -550,7 +547,7 @@ static int xfrmi_get_iflink(const struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); - return xi->phydev->ifindex; + return xi->p.link; } @@ -576,12 +573,14 @@ static void xfrmi_dev_setup(struct net_device *dev) dev->needs_free_netdev = true; dev->priv_destructor = xfrmi_dev_free; netif_keep_dst(dev); + + eth_broadcast_addr(dev->broadcast); } static int xfrmi_dev_init(struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); - struct net_device *phydev = xi->phydev; + struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link); int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); @@ -596,13 +595,19 @@ static int xfrmi_dev_init(struct net_device *dev) dev->features |= NETIF_F_LLTX; - dev->needed_headroom = phydev->needed_headroom; - dev->needed_tailroom = phydev->needed_tailroom; + if (phydev) { + dev->needed_headroom = phydev->needed_headroom; + dev->needed_tailroom = phydev->needed_tailroom; - if (is_zero_ether_addr(dev->dev_addr)) - eth_hw_addr_inherit(dev, phydev); - if (is_zero_ether_addr(dev->broadcast)) - memcpy(dev->broadcast, phydev->broadcast, dev->addr_len); + if (is_zero_ether_addr(dev->dev_addr)) + eth_hw_addr_inherit(dev, phydev); + if (is_zero_ether_addr(dev->broadcast)) + memcpy(dev->broadcast, phydev->broadcast, + dev->addr_len); + } else { + eth_hw_addr_random(dev); + eth_broadcast_addr(dev->broadcast); + } return 0; } @@ -638,12 +643,6 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, int err; xfrmi_netlink_parms(data, &p); - - if (!tb[IFLA_IFNAME]) - return -EINVAL; - - nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ); - xi = xfrmi_locate(net, &p); if (xi) return -EEXIST; @@ -652,13 +651,8 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, xi->p = p; xi->net = net; xi->dev = dev; - xi->phydev = dev_get_by_index(net, p.link); - if (!xi->phydev) - return -ENODEV; err = xfrmi_create(dev); - if (err < 0) - dev_put(xi->phydev); return err; } @@ -672,11 +666,11 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct xfrm_if *xi = netdev_priv(dev); - struct net *net = dev_net(dev); + struct net *net = xi->net; + struct xfrm_if_parms p; - xfrmi_netlink_parms(data, &xi->p); - - xi = xfrmi_locate(net, &xi->p); + xfrmi_netlink_parms(data, &p); + xi = xfrmi_locate(net, &p); if (!xi) { xi = netdev_priv(dev); } else { @@ -684,7 +678,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], return -EEXIST; } - return xfrmi_update(xi, &xi->p); + return xfrmi_update(xi, &p); } static size_t xfrmi_get_size(const struct net_device *dev) @@ -715,7 +709,7 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); - return dev_net(xi->phydev); + return xi->net; } static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ec94f5795ea4..21e939235b39 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -912,6 +912,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, } else if (delta > 0) { p = &parent->rb_right; } else { + bool same_prefixlen = node->prefixlen == n->prefixlen; struct xfrm_policy *tmp; hlist_for_each_entry(tmp, &n->hhead, bydst) { @@ -919,9 +920,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, hlist_del_rcu(&tmp->bydst); } + node->prefixlen = prefixlen; + xfrm_policy_inexact_list_reinsert(net, node, family); - if (node->prefixlen == n->prefixlen) { + if (same_prefixlen) { kfree_rcu(n, rcu); return; } @@ -929,7 +932,6 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, rb_erase(*p, new); kfree_rcu(n, rcu); n = node; - n->prefixlen = prefixlen; goto restart; } } diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index e73ec040e250..ecba39c93fd9 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key, { struct request_key_auth *rka = dereference_key_rcu(key); + if (!rka) + return; + seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_positive(key)) @@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key, size_t datalen; long ret; + if (!rka) + return -EKEYREVOKED; + datalen = rka->callout_len; ret = datalen; diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 92390d457567..18e6546b4467 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) while (id >= 0) { const struct hda_fixup *fix = codec->fixup_list + id; + if (++depth > 10) + break; if (fix->chained_before) apply_fixup(codec, fix->chain_id, action, depth + 1); @@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) } if (!fix->chained || fix->chained_before) break; - if (++depth > 10) - break; id = fix->chain_id; } } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 5bf24fb819d2..10d502328b76 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec) if (spec->init_hook) spec->init_hook(codec); - snd_hda_apply_verbs(codec); + if (!spec->skip_verbs) + snd_hda_apply_verbs(codec); init_multi_out(codec); init_extra_out(codec); diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 5f199dcb0d18..fb9f1a90238b 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -243,6 +243,7 @@ struct hda_gen_spec { unsigned int indep_hp_enabled:1; /* independent HP enabled */ unsigned int have_aamix_ctl:1; unsigned int hp_mic_jack_modes:1; + unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */ /* additional mute flags (only effective with auto_mute_via_amp=1) */ u64 mute_bits; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e333b3e30e31..c1ddfd2fac52 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec) if (spec->init_hook) spec->init_hook(codec); + spec->gen.skip_verbs = 1; /* applied in below */ snd_hda_gen_init(codec); alc_fix_pll(codec); alc_auto_init_amp(codec, spec->init_amp); + snd_hda_apply_verbs(codec); /* apply verbs here after own init */ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); @@ -5797,6 +5799,7 @@ enum { ALC286_FIXUP_ACER_AIO_HEADSET_MIC, ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, + ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, }; static const struct hda_fixup alc269_fixups[] = { @@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = { { } } }, + [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ + { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), @@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec) static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269), HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269), HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c index 8219a30853d2..0fc1b6d4b0f9 100644 --- a/tools/testing/selftests/cgroup/test_freezer.c +++ b/tools/testing/selftests/cgroup/test_freezer.c @@ -447,6 +447,59 @@ static int test_cgfreezer_forkbomb(const char *root) return ret; } +/* + * The test creates a cgroups and freezes it. Then it creates a child cgroup + * and populates it with a task. After that it checks that the child cgroup + * is frozen and the parent cgroup remains frozen too. + */ +static int test_cgfreezer_mkdir(const char *root) +{ + int ret = KSFT_FAIL; + char *parent, *child = NULL; + int pid; + + parent = cg_name(root, "cg_test_mkdir_A"); + if (!parent) + goto cleanup; + + child = cg_name(parent, "cg_test_mkdir_B"); + if (!child) + goto cleanup; + + if (cg_create(parent)) + goto cleanup; + + if (cg_freeze_wait(parent, true)) + goto cleanup; + + if (cg_create(child)) + goto cleanup; + + pid = cg_run_nowait(child, child_fn, NULL); + if (pid < 0) + goto cleanup; + + if (cg_wait_for_proc_count(child, 1)) + goto cleanup; + + if (cg_check_frozen(child, true)) + goto cleanup; + + if (cg_check_frozen(parent, true)) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (child) + cg_destroy(child); + free(child); + if (parent) + cg_destroy(parent); + free(parent); + return ret; +} + /* * The test creates two nested cgroups, freezes the parent * and removes the child. Then it checks that the parent cgroup @@ -815,6 +868,7 @@ struct cgfreezer_test { T(test_cgfreezer_simple), T(test_cgfreezer_tree), T(test_cgfreezer_forkbomb), + T(test_cgfreezer_mkdir), T(test_cgfreezer_rmdir), T(test_cgfreezer_migrate), T(test_cgfreezer_ptrace), diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index c5c93d5fb3ad..f9ebeac1e6f2 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -212,6 +212,8 @@ check_output() printf " ${out}\n" printf " Expected:\n" printf " ${expected}\n\n" + else + echo " WARNING: Unexpected route entry" fi fi @@ -274,7 +276,7 @@ ipv6_fcnal() run_cmd "$IP nexthop get id 52" log_test $? 0 "Get nexthop by id" - check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1" + check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1 scope link" run_cmd "$IP nexthop del id 52" log_test $? 0 "Delete nexthop by id" @@ -479,12 +481,12 @@ ipv6_fcnal_runtime() run_cmd "$IP -6 nexthop add id 85 dev veth1" run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85" log_test $? 0 "IPv6 route with device only nexthop" - check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1" + check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1 metric 1024 pref medium" run_cmd "$IP nexthop add id 123 group 81/85" run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123" log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw" - check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 nexthop via 2001:db8:91::2 dev veth1 nexthop dev veth1" + check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 123 metric 1024 nexthop via 2001:db8:91::2 dev veth1 weight 1 nexthop dev veth1 weight 1 pref medium" # # IPv6 route with v4 nexthop - not allowed @@ -538,7 +540,7 @@ ipv4_fcnal() run_cmd "$IP nexthop get id 12" log_test $? 0 "Get nexthop by id" - check_nexthop "id 12" "id 12 via 172.16.1.2 src 172.16.1.1 dev veth1 scope link" + check_nexthop "id 12" "id 12 via 172.16.1.2 dev veth1 scope link" run_cmd "$IP nexthop del id 12" log_test $? 0 "Delete nexthop by id" @@ -685,7 +687,7 @@ ipv4_withv6_fcnal() set +e run_cmd "$IP ro add 172.16.101.1/32 nhid 11" log_test $? 0 "IPv6 nexthop with IPv4 route" - check_route "172.16.101.1" "172.16.101.1 nhid 11 via ${lladdr} dev veth1" + check_route "172.16.101.1" "172.16.101.1 nhid 11 via inet6 ${lladdr} dev veth1" set -e run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1" @@ -694,11 +696,11 @@ ipv4_withv6_fcnal() run_cmd "$IP ro replace 172.16.101.1/32 nhid 101" log_test $? 0 "IPv6 nexthop with IPv4 route" - check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" + check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1" log_test $? 0 "IPv4 route with IPv6 gateway" - check_route "172.16.101.1" "172.16.101.1 via ${lladdr} dev veth1" + check_route "172.16.101.1" "172.16.101.1 via inet6 ${lladdr} dev veth1" run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1" log_test $? 2 "IPv4 route with invalid IPv6 gateway" @@ -785,10 +787,10 @@ ipv4_fcnal_runtime() log_test $? 0 "IPv4 route with device only nexthop" check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1" - run_cmd "$IP nexthop add id 122 group 21/85" - run_cmd "$IP ro replace 172.16.101.1/32 nhid 122" + run_cmd "$IP nexthop add id 123 group 21/85" + run_cmd "$IP ro replace 172.16.101.1/32 nhid 123" log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw" - check_route "172.16.101.1" "172.16.101.1 nhid 85 nexthop via 172.16.1.2 dev veth1 nexthop dev veth1" + check_route "172.16.101.1" "172.16.101.1 nhid 123 nexthop via 172.16.1.2 dev veth1 weight 1 nexthop dev veth1 weight 1" # # IPv4 with IPv6 @@ -820,7 +822,7 @@ ipv4_fcnal_runtime() run_cmd "$IP ro replace 172.16.101.1/32 nhid 101" log_test $? 0 "IPv4 route with mixed v4-v6 multipath route" - check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" + check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" log_test $? 0 "IPv6 nexthop with IPv4 route" diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh index 5445943bf07f..7a1bf94c5bd3 100755 --- a/tools/testing/selftests/net/xfrm_policy.sh +++ b/tools/testing/selftests/net/xfrm_policy.sh @@ -106,6 +106,13 @@ do_overlap() # # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23. ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block + + # similar to above: add policies (with partially random address), with shrinking prefixes. + for p in 29 28 27;do + for k in $(seq 1 32); do + ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null + done + done } do_esp_policy_get_check() {