Merge branch 'ib-pdx86-properties'

Merge branch 'ib-pdx86-properties' of
git://git.infradead.org/linux-platform-drivers-x86.git
to avoid conflicts in PDx86.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
This commit is contained in:
Andy Shevchenko 2020-04-20 18:34:20 +03:00
commit f7ea285b62
336 changed files with 3376 additions and 2210 deletions

View File

@ -390,9 +390,17 @@ When ``kptr_restrict`` is set to 2, kernel pointers printed using
modprobe
========
This gives the full path of the modprobe command which the kernel will
use to load modules. This can be used to debug module loading
requests::
The full path to the usermode helper for autoloading kernel modules,
by default "/sbin/modprobe". This binary is executed when the kernel
requests a module. For example, if userspace passes an unknown
filesystem type to mount(), then the kernel will automatically request
the corresponding filesystem module by executing this usermode helper.
This usermode helper should insert the needed module into the kernel.
This sysctl only affects module autoloading. It has no effect on the
ability to explicitly insert modules.
This sysctl can be used to debug module loading requests::
echo '#! /bin/sh' > /tmp/modprobe
echo 'echo "$@" >> /tmp/modprobe.log' >> /tmp/modprobe
@ -400,10 +408,15 @@ requests::
chmod a+x /tmp/modprobe
echo /tmp/modprobe > /proc/sys/kernel/modprobe
This only applies when the *kernel* is requesting that the module be
loaded; it won't have any effect if the module is being loaded
explicitly using ``modprobe`` from userspace.
Alternatively, if this sysctl is set to the empty string, then module
autoloading is completely disabled. The kernel will not try to
execute a usermode helper at all, nor will it call the
kernel_module_request LSM hook.
If CONFIG_STATIC_USERMODEHELPER=y is set in the kernel configuration,
then the configured static usermode helper overrides this sysctl,
except that the empty string is still accepted to completely disable
module autoloading as described above.
modules_disabled
================
@ -446,28 +459,6 @@ Notes:
successful IPC object allocation. If an IPC object allocation syscall
fails, it is undefined if the value remains unmodified or is reset to -1.
modprobe:
=========
The path to the usermode helper for autoloading kernel modules, by
default "/sbin/modprobe". This binary is executed when the kernel
requests a module. For example, if userspace passes an unknown
filesystem type to mount(), then the kernel will automatically request
the corresponding filesystem module by executing this usermode helper.
This usermode helper should insert the needed module into the kernel.
This sysctl only affects module autoloading. It has no effect on the
ability to explicitly insert modules.
If this sysctl is set to the empty string, then module autoloading is
completely disabled. The kernel will not try to execute a usermode
helper at all, nor will it call the kernel_module_request LSM hook.
If CONFIG_STATIC_USERMODEHELPER=y is set in the kernel configuration,
then the configured static usermode helper overrides this sysctl,
except that the empty string is still accepted to completely disable
module autoloading as described above.
nmi_watchdog
============

View File

@ -154,9 +154,9 @@ architectures. These are the recommended replacements:
Use ktime_get() or ktime_get_ts64() instead.
.. c:function:: struct timeval do_gettimeofday( void )
struct timespec getnstimeofday( void )
struct timespec64 getnstimeofday64( void )
.. c:function:: void do_gettimeofday( struct timeval * )
void getnstimeofday( struct timespec * )
void getnstimeofday64( struct timespec64 * )
void ktime_get_real_ts( struct timespec * )
ktime_get_real_ts64() is a direct replacement, but consider using

View File

@ -42,6 +42,10 @@ properties:
description:
See section 2.3.9 of the DeviceTree Specification.
'#address-cells': true
'#size-cells': true
required:
- "#interconnect-cells"
- compatible
@ -59,6 +63,8 @@ examples:
compatible = "allwinner,sun5i-a13-mbus";
reg = <0x01c01000 0x1000>;
clocks = <&ccu CLK_MBUS>;
#address-cells = <1>;
#size-cells = <1>;
dma-ranges = <0x00000000 0x40000000 0x20000000>;
#interconnect-cells = <1>;
};

View File

@ -91,7 +91,7 @@ required:
examples:
- |
vco1: clock@00 {
vco1: clock {
compatible = "arm,impd1-vco1";
#clock-cells = <0>;
lock-offset = <0x08>;

View File

@ -1,5 +1,5 @@
Analog Device ADV7123 Video DAC
-------------------------------
Analog Devices ADV7123 Video DAC
--------------------------------
The ADV7123 is a digital-to-analog converter that outputs VGA signals from a
parallel video input.

View File

@ -1,5 +1,5 @@
Analog Device ADV7511(W)/13/33/35 HDMI Encoders
-----------------------------------------
Analog Devices ADV7511(W)/13/33/35 HDMI Encoders
------------------------------------------------
The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space

View File

@ -1,4 +1,4 @@
Analog Device AXI-DMAC DMA controller
Analog Devices AXI-DMAC DMA controller
Required properties:
- compatible: Must be "adi,axi-dmac-1.00.a".

View File

@ -2,7 +2,7 @@
# Copyright 2019 Analog Devices Inc.
%YAML 1.2
---
$id: http://devicetree.org/schemas/bindings/hwmon/adi,axi-fan-control.yaml#
$id: http://devicetree.org/schemas/hwmon/adi,axi-fan-control.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices AXI FAN Control Device Tree Bindings
@ -47,7 +47,7 @@ required:
examples:
- |
fpga_axi: fpga-axi@0 {
fpga_axi: fpga-axi {
#address-cells = <0x2>;
#size-cells = <0x1>;

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/adt7475.yaml#
$id: http://devicetree.org/schemas/hwmon/adt7475.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ADT7475 hwmon sensor

View File

@ -1,4 +1,4 @@
* Analog Device AD5755 IIO Multi-Channel DAC Linux Driver
* Analog Devices AD5755 IIO Multi-Channel DAC Linux Driver
Required properties:
- compatible: Has to contain one of the following:

View File

@ -2,7 +2,7 @@
# Copyright 2020 Analog Devices Inc.
%YAML 1.2
---
$id: http://devicetree.org/schemas/bindings/iio/dac/adi,ad5770r.yaml#
$id: http://devicetree.org/schemas/iio/dac/adi,ad5770r.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices AD5770R DAC device driver
@ -49,93 +49,86 @@ properties:
asserted during driver probe.
maxItems: 1
channel0:
channel@0:
description: Represents an external channel which are
connected to the DAC. Channel 0 can act both as a current
source and sink.
type: object
properties:
num:
reg:
description: This represents the channel number.
items:
const: 0
const: 0
adi,range-microamp:
description: Output range of the channel.
oneOf:
- $ref: /schemas/types.yaml#/definitions/int32-array
- items:
- enum: [0 300000]
- enum: [-60000 0]
- enum: [-60000 300000]
- const: 0
- const: 300000
- items:
- const: -60000
- const: 0
- items:
- const: -60000
- const: 300000
channel1:
channel@1:
description: Represents an external channel which are
connected to the DAC.
type: object
properties:
num:
reg:
description: This represents the channel number.
items:
const: 1
const: 1
adi,range-microamp:
description: Output range of the channel.
oneOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
- enum: [0 140000]
- enum: [0 250000]
items:
- const: 0
- enum: [ 140000, 250000 ]
channel2:
channel@2:
description: Represents an external channel which are
connected to the DAC.
type: object
properties:
num:
reg:
description: This represents the channel number.
items:
const: 2
const: 2
adi,range-microamp:
description: Output range of the channel.
oneOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
- enum: [0 140000]
- enum: [0 250000]
items:
- const: 0
- enum: [ 55000, 150000 ]
patternProperties:
"^channel@([3-5])$":
type: object
description: Represents the external channels which are connected to the DAC.
properties:
num:
reg:
description: This represents the channel number.
items:
minimum: 3
maximum: 5
minimum: 3
maximum: 5
adi,range-microamp:
description: Output range of the channel.
oneOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
- enum: [0 45000]
- enum: [0 100000]
items:
- const: 0
- enum: [ 45000, 100000 ]
required:
- reg
- diff-channels
- channel0
- channel1
- channel2
- channel3
- channel4
- channel5
- channel@0
- channel@1
- channel@2
- channel@3
- channel@4
- channel@5
examples:
- |
@ -144,40 +137,42 @@ examples:
#size-cells = <0>;
ad5770r@0 {
compatible = "ad5770r";
compatible = "adi,ad5770r";
reg = <0>;
spi-max-frequency = <1000000>;
vref-supply = <&vref>;
adi,external-resistor;
reset-gpios = <&gpio 22 0>;
#address-cells = <1>;
#size-cells = <0>;
channel@0 {
num = <0>;
adi,range-microamp = <(-60000) 300000>;
reg = <0>;
adi,range-microamp = <0 300000>;
};
channel@1 {
num = <1>;
reg = <1>;
adi,range-microamp = <0 140000>;
};
channel@2 {
num = <2>;
reg = <2>;
adi,range-microamp = <0 55000>;
};
channel@3 {
num = <3>;
reg = <3>;
adi,range-microamp = <0 45000>;
};
channel@4 {
num = <4>;
reg = <4>;
adi,range-microamp = <0 45000>;
};
channel@5 {
num = <5>;
reg = <5>;
adi,range-microamp = <0 45000>;
};
};

View File

@ -109,7 +109,7 @@ examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
i2c@00000000 {
i2c {
#address-cells = <1>;
#size-cells = <0>;
edt-ft5x06@38 {

View File

@ -56,9 +56,8 @@ properties:
cell with zero.
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
minItems: 4
maxItems: 4
- minItems: 4
maxItems: 4
required:

View File

@ -97,30 +97,35 @@ examples:
#include <dt-bindings/clock/tegra186-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
memory-controller@2c00000 {
compatible = "nvidia,tegra186-mc";
reg = <0x0 0x02c00000 0x0 0xb0000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
bus {
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x02c00000 0x02c00000 0x0 0xb0000>;
memory-controller@2c00000 {
compatible = "nvidia,tegra186-mc";
reg = <0x0 0x02c00000 0x0 0xb0000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
/*
* Memory clients have access to all 40 bits that the memory
* controller can address.
*/
dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
#address-cells = <2>;
#size-cells = <2>;
external-memory-controller@2c60000 {
compatible = "nvidia,tegra186-emc";
reg = <0x0 0x02c60000 0x0 0x50000>;
interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_EMC>;
clock-names = "emc";
ranges = <0x0 0x02c00000 0x0 0x02c00000 0x0 0xb0000>;
nvidia,bpmp = <&bpmp>;
/*
* Memory clients have access to all 40 bits that the memory
* controller can address.
*/
dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
external-memory-controller@2c60000 {
compatible = "nvidia,tegra186-emc";
reg = <0x0 0x02c60000 0x0 0x50000>;
interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA186_CLK_EMC>;
clock-names = "emc";
nvidia,bpmp = <&bpmp>;
};
};
};

View File

@ -123,7 +123,9 @@ examples:
#include <dt-bindings/leds/common.h>
i2c {
pmic: pmic@4b {
#address-cells = <1>;
#size-cells = <0>;
pmic: pmic@4b {
compatible = "rohm,bd71837";
reg = <0x4b>;
interrupt-parent = <&gpio1>;

View File

@ -128,7 +128,9 @@ examples:
#include <dt-bindings/leds/common.h>
i2c {
pmic: pmic@4b {
#address-cells = <1>;
#size-cells = <0>;
pmic: pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
interrupt-parent = <&gpio1>;

View File

@ -274,7 +274,7 @@ examples:
- |
#include <dt-bindings/mfd/st,stpmic1.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
i2c@0 {
i2c {
#address-cells = <1>;
#size-cells = <0>;
pmic@33 {

View File

@ -43,6 +43,9 @@ properties:
second group of digits is the Phy Identifier 2 register,
this is the chip vendor OUI bits 19:24, followed by 10
bits of a vendor specific ID.
- items:
- pattern: "^ethernet-phy-id[a-f0-9]{4}\\.[a-f0-9]{4}$"
- const: ethernet-phy-ieee802.3-c22
- items:
- pattern: "^ethernet-phy-id[a-f0-9]{4}\\.[a-f0-9]{4}$"
- const: ethernet-phy-ieee802.3-c45

View File

@ -22,6 +22,8 @@ Optional properties:
- fsl,err006687-workaround-present: If present indicates that the system has
the hardware workaround for ERR006687 applied and does not need a software
workaround.
- gpr: phandle of SoC general purpose register mode. Required for wake on LAN
on some SoCs
-interrupt-names: names of the interrupts listed in interrupts property in
the same order. The defaults if not specified are
__Number of interrupts__ __Default__

View File

@ -48,6 +48,7 @@ examples:
switch@10 {
compatible = "qca,qca8337";
reg = <0x10>;
/* ... */
};
};

View File

@ -29,7 +29,7 @@ Required properties for compatible string qcom,wcn399x-bt:
Optional properties for compatible string qcom,wcn399x-bt:
- max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
- max-speed: see Documentation/devicetree/bindings/serial/serial.yaml
- firmware-name: specify the name of nvm firmware to load
- clocks: clock provided to the controller

View File

@ -146,7 +146,7 @@ patternProperties:
bindings specified in
Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
Torrent SERDES should follow the bindings specified in
Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
required:
- compatible

View File

@ -31,10 +31,17 @@ additionalProperties: false
examples:
- |
cros-ec@0 {
compatible = "google,cros-ec-spi";
cros_ec_pwm: ec-pwm {
compatible = "google,cros-ec-pwm";
#pwm-cells = <1>;
spi {
#address-cells = <1>;
#size-cells = <0>;
cros-ec@0 {
compatible = "google,cros-ec-spi";
reg = <0>;
cros_ec_pwm: ec-pwm {
compatible = "google,cros-ec-pwm";
#pwm-cells = <1>;
};
};
};

View File

@ -39,7 +39,7 @@ additionalProperties: false
examples:
- |
rng {
rng@7e104000 {
compatible = "brcm,bcm2835-rng";
reg = <0x7e104000 0x10>;
interrupts = <2 29>;

View File

@ -61,7 +61,7 @@ examples:
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc: soc@0 {
soc: soc {
#address-cells = <2>;
#size-cells = <2>;

View File

@ -56,7 +56,7 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/clock/jz4740-cgu.h>
usb_phy: usb-phy@0 {
usb_phy: usb-phy {
compatible = "usb-nop-xceiv";
#phy-cells = <0>;
};

View File

@ -52,8 +52,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
Phy documentation is provided in the following places:
Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt - USB3 QMP PHY
Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt - USB2 QUSB2 PHY
Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt - USB3 QMP PHY
Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml - USB2 QUSB2 PHY
Example device nodes:

View File

@ -16,7 +16,7 @@ A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
Phy documentation is provided in the following places:
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml - USB2.0 PHY
Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
Example device nodes:

View File

@ -16,7 +16,7 @@ Supported chips:
* Renesas ISL68220
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl68220'
Addresses scanned: -
@ -26,7 +26,7 @@ Supported chips:
* Renesas ISL68221
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl68221'
Addresses scanned: -
@ -36,7 +36,7 @@ Supported chips:
* Renesas ISL68222
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl68222'
Addresses scanned: -
@ -46,7 +46,7 @@ Supported chips:
* Renesas ISL68223
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl68223'
Addresses scanned: -
@ -56,7 +56,7 @@ Supported chips:
* Renesas ISL68224
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl68224'
Addresses scanned: -
@ -66,7 +66,7 @@ Supported chips:
* Renesas ISL68225
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl68225'
Addresses scanned: -
@ -76,7 +76,7 @@ Supported chips:
* Renesas ISL68226
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl68226'
Addresses scanned: -
@ -86,7 +86,7 @@ Supported chips:
* Renesas ISL68227
Prefix: 'raa_dmpvr2_1rail'
Prefix: 'isl68227'
Addresses scanned: -
@ -96,7 +96,7 @@ Supported chips:
* Renesas ISL68229
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl68229'
Addresses scanned: -
@ -106,7 +106,7 @@ Supported chips:
* Renesas ISL68233
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl68233'
Addresses scanned: -
@ -116,7 +116,7 @@ Supported chips:
* Renesas ISL68239
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl68239'
Addresses scanned: -
@ -126,7 +126,7 @@ Supported chips:
* Renesas ISL69222
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69222'
Addresses scanned: -
@ -136,7 +136,7 @@ Supported chips:
* Renesas ISL69223
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl69223'
Addresses scanned: -
@ -146,7 +146,7 @@ Supported chips:
* Renesas ISL69224
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69224'
Addresses scanned: -
@ -156,7 +156,7 @@ Supported chips:
* Renesas ISL69225
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69225'
Addresses scanned: -
@ -166,7 +166,7 @@ Supported chips:
* Renesas ISL69227
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl69227'
Addresses scanned: -
@ -176,7 +176,7 @@ Supported chips:
* Renesas ISL69228
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl69228'
Addresses scanned: -
@ -186,7 +186,7 @@ Supported chips:
* Renesas ISL69234
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69234'
Addresses scanned: -
@ -196,7 +196,7 @@ Supported chips:
* Renesas ISL69236
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69236'
Addresses scanned: -
@ -206,7 +206,7 @@ Supported chips:
* Renesas ISL69239
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl69239'
Addresses scanned: -
@ -216,7 +216,7 @@ Supported chips:
* Renesas ISL69242
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69242'
Addresses scanned: -
@ -226,7 +226,7 @@ Supported chips:
* Renesas ISL69243
Prefix: 'raa_dmpvr2_1rail'
Prefix: 'isl69243'
Addresses scanned: -
@ -236,7 +236,7 @@ Supported chips:
* Renesas ISL69247
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69247'
Addresses scanned: -
@ -246,7 +246,7 @@ Supported chips:
* Renesas ISL69248
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69248'
Addresses scanned: -
@ -256,7 +256,7 @@ Supported chips:
* Renesas ISL69254
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69254'
Addresses scanned: -
@ -266,7 +266,7 @@ Supported chips:
* Renesas ISL69255
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69255'
Addresses scanned: -
@ -276,7 +276,7 @@ Supported chips:
* Renesas ISL69256
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69256'
Addresses scanned: -
@ -286,7 +286,7 @@ Supported chips:
* Renesas ISL69259
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69259'
Addresses scanned: -
@ -296,7 +296,7 @@ Supported chips:
* Renesas ISL69260
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69260'
Addresses scanned: -
@ -306,7 +306,7 @@ Supported chips:
* Renesas ISL69268
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69268'
Addresses scanned: -
@ -316,7 +316,7 @@ Supported chips:
* Renesas ISL69269
Prefix: 'raa_dmpvr2_3rail'
Prefix: 'isl69269'
Addresses scanned: -
@ -326,7 +326,7 @@ Supported chips:
* Renesas ISL69298
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'isl69298'
Addresses scanned: -
@ -336,7 +336,7 @@ Supported chips:
* Renesas RAA228000
Prefix: 'raa_dmpvr2_hv'
Prefix: 'raa228000'
Addresses scanned: -
@ -346,7 +346,7 @@ Supported chips:
* Renesas RAA228004
Prefix: 'raa_dmpvr2_hv'
Prefix: 'raa228004'
Addresses scanned: -
@ -356,7 +356,7 @@ Supported chips:
* Renesas RAA228006
Prefix: 'raa_dmpvr2_hv'
Prefix: 'raa228006'
Addresses scanned: -
@ -366,7 +366,7 @@ Supported chips:
* Renesas RAA228228
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'raa228228'
Addresses scanned: -
@ -376,7 +376,7 @@ Supported chips:
* Renesas RAA229001
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'raa229001'
Addresses scanned: -
@ -386,7 +386,7 @@ Supported chips:
* Renesas RAA229004
Prefix: 'raa_dmpvr2_2rail'
Prefix: 'raa229004'
Addresses scanned: -

View File

@ -257,6 +257,8 @@ drivers:
* :doc:`netdevsim`
* :doc:`mlxsw`
.. _Generic-Packet-Trap-Groups:
Generic Packet Trap Groups
==========================

View File

@ -22,6 +22,7 @@ Contents:
z8530book
msg_zerocopy
failover
net_dim
net_failover
phy
sfp-phylink

View File

@ -812,7 +812,7 @@ tcp_limit_output_bytes - INTEGER
tcp_challenge_ack_limit - INTEGER
Limits number of Challenge ACK sent per second, as recommended
in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
Default: 100
Default: 1000
tcp_rx_skb_cache - BOOLEAN
Controls a per TCP socket cache of one skb, that might help

View File

@ -1,28 +1,20 @@
======================================================
Net DIM - Generic Network Dynamic Interrupt Moderation
======================================================
Author:
Tal Gilboa <talgi@mellanox.com>
:Author: Tal Gilboa <talgi@mellanox.com>
.. contents:: :depth: 2
Contents
=========
- Assumptions
- Introduction
- The Net DIM Algorithm
- Registering a Network Device to DIM
- Example
Part 0: Assumptions
======================
Assumptions
===========
This document assumes the reader has basic knowledge in network drivers
and in general interrupt moderation.
Part I: Introduction
======================
Introduction
============
Dynamic Interrupt Moderation (DIM) (in networking) refers to changing the
interrupt moderation configuration of a channel in order to optimize packet
@ -41,14 +33,15 @@ number of wanted packets per event. The Net DIM algorithm ascribes importance to
increase bandwidth over reducing interrupt rate.
Part II: The Net DIM Algorithm
===============================
Net DIM Algorithm
=================
Each iteration of the Net DIM algorithm follows these steps:
1. Calculates new data sample.
2. Compares it to previous sample.
3. Makes a decision - suggests interrupt moderation configuration fields.
4. Applies a schedule work function, which applies suggested configuration.
#. Calculates new data sample.
#. Compares it to previous sample.
#. Makes a decision - suggests interrupt moderation configuration fields.
#. Applies a schedule work function, which applies suggested configuration.
The first two steps are straightforward, both the new and the previous data are
supplied by the driver registered to Net DIM. The previous data is the new data
@ -89,19 +82,21 @@ manoeuvre as it may provide partial data or ignore the algorithm suggestion
under some conditions.
Part III: Registering a Network Device to DIM
==============================================
Registering a Network Device to DIM
===================================
Net DIM API exposes the main function net_dim(struct dim *dim,
struct dim_sample end_sample). This function is the entry point to the Net
Net DIM API exposes the main function net_dim().
This function is the entry point to the Net
DIM algorithm and has to be called every time the driver would like to check if
it should change interrupt moderation parameters. The driver should provide two
data structures: struct dim and struct dim_sample. Struct dim
data structures: :c:type:`struct dim <dim>` and
:c:type:`struct dim_sample <dim_sample>`. :c:type:`struct dim <dim>`
describes the state of DIM for a specific object (RX queue, TX queue,
other queues, etc.). This includes the current selected profile, previous data
samples, the callback function provided by the driver and more.
Struct dim_sample describes a data sample, which will be compared to the
data sample stored in struct dim in order to decide on the algorithm's next
:c:type:`struct dim_sample <dim_sample>` describes a data sample,
which will be compared to the data sample stored in :c:type:`struct dim <dim>`
in order to decide on the algorithm's next
step. The sample should include bytes, packets and interrupts, measured by
the driver.
@ -110,9 +105,10 @@ main net_dim() function. The recommended method is to call net_dim() on each
interrupt. Since Net DIM has a built-in moderation and it might decide to skip
iterations under certain conditions, there is no need to moderate the net_dim()
calls as well. As mentioned above, the driver needs to provide an object of type
struct dim to the net_dim() function call. It is advised for each entity
using Net DIM to hold a struct dim as part of its data structure and use it
as the main Net DIM API object. The struct dim_sample should hold the latest
:c:type:`struct dim <dim>` to the net_dim() function call. It is advised for
each entity using Net DIM to hold a :c:type:`struct dim <dim>` as part of its
data structure and use it as the main Net DIM API object.
The :c:type:`struct dim_sample <dim_sample>` should hold the latest
bytes, packets and interrupts count. No need to perform any calculations, just
include the raw data.
@ -124,19 +120,19 @@ the data flow. After the work is done, Net DIM algorithm needs to be set to
the proper state in order to move to the next iteration.
Part IV: Example
=================
Example
=======
The following code demonstrates how to register a driver to Net DIM. The actual
usage is not complete but it should make the outline of the usage clear.
my_driver.c:
.. code-block:: c
#include <linux/dim.h>
#include <linux/dim.h>
/* Callback for net DIM to schedule on a decision to change moderation */
void my_driver_do_dim_work(struct work_struct *work)
{
/* Callback for net DIM to schedule on a decision to change moderation */
void my_driver_do_dim_work(struct work_struct *work)
{
/* Get struct dim from struct work_struct */
struct dim *dim = container_of(work, struct dim,
work);
@ -145,11 +141,11 @@ void my_driver_do_dim_work(struct work_struct *work)
/* Signal net DIM work is done and it should move to next iteration */
dim->state = DIM_START_MEASURE;
}
}
/* My driver's interrupt handler */
int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
{
/* My driver's interrupt handler */
int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
{
...
/* A struct to hold current measured data */
struct dim_sample dim_sample;
@ -162,13 +158,19 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
/* Call net DIM */
net_dim(&my_entity->dim, dim_sample);
...
}
}
/* My entity's initialization function (my_entity was already allocated) */
int my_driver_init_my_entity(struct my_driver_entity *my_entity, ...)
{
/* My entity's initialization function (my_entity was already allocated) */
int my_driver_init_my_entity(struct my_driver_entity *my_entity, ...)
{
...
/* Initiate struct work_struct with my driver's callback function */
INIT_WORK(&my_entity->dim.work, my_driver_do_dim_work);
...
}
}
Dynamic Interrupt Moderation (DIM) library API
==============================================
.. kernel-doc:: include/linux/dim.h
:internal:

View File

@ -1399,8 +1399,8 @@ must have read/write permission; CS must be __BOOT_CS and DS, ES, SS
must be __BOOT_DS; interrupt must be disabled; %rsi must hold the base
address of the struct boot_params.
EFI Handover Protocol
=====================
EFI Handover Protocol (deprecated)
==================================
This protocol allows boot loaders to defer initialisation to the EFI
boot stub. The boot loader is required to load the kernel/initrd(s)
@ -1408,6 +1408,12 @@ from the boot media and jump to the EFI handover protocol entry point
which is hdr->handover_offset bytes from the beginning of
startup_{32,64}.
The boot loader MUST respect the kernel's PE/COFF metadata when it comes
to section alignment, the memory footprint of the executable image beyond
the size of the file itself, and any other aspect of the PE/COFF header
that may affect correct operation of the image as a PE/COFF binary in the
execution context provided by the EFI firmware.
The function prototype for the handover entry point looks like this::
efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
@ -1419,9 +1425,18 @@ UEFI specification. 'bp' is the boot loader-allocated boot params.
The boot loader *must* fill out the following fields in bp::
- hdr.code32_start
- hdr.cmd_line_ptr
- hdr.ramdisk_image (if applicable)
- hdr.ramdisk_size (if applicable)
All other fields should be zero.
NOTE: The EFI Handover Protocol is deprecated in favour of the ordinary PE/COFF
entry point, combined with the LINUX_EFI_INITRD_MEDIA_GUID based initrd
loading protocol (refer to [0] for an example of the bootloader side of
this), which removes the need for any knowledge on the part of the EFI
bootloader regarding the internal representation of boot_params or any
requirements/limitations regarding the placement of the command line
and ramdisk in memory, or the placement of the kernel image itself.
[0] https://github.com/u-boot/u-boot/commit/ec80b4735a593961fe701cc3a5d717d4739b0fd0

View File

@ -1323,7 +1323,10 @@ ARM INTEGRATOR, VERSATILE AND REALVIEW SUPPORT
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/arm-boards
F: Documentation/devicetree/bindings/arm/arm,integrator.yaml
F: Documentation/devicetree/bindings/arm/arm,realview.yaml
F: Documentation/devicetree/bindings/arm/arm,versatile.yaml
F: Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
F: Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
F: Documentation/devicetree/bindings/i2c/i2c-versatile.txt
@ -5552,7 +5555,7 @@ M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
F: Documentation/devicetree/bindings/display/allwinner*
F: drivers/gpu/drm/sun4i/
DRM DRIVERS FOR AMLOGIC SOCS
@ -5934,6 +5937,7 @@ M: Tal Gilboa <talgi@mellanox.com>
S: Maintained
F: include/linux/dim.h
F: lib/dim/
F: Documentation/networking/net_dim.rst
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
@ -13853,7 +13857,8 @@ S: Maintained
F: drivers/scsi/qla1280.[ch]
QLOGIC QLA2XXX FC-SCSI DRIVER
M: hmadhani@marvell.com
M: Nilesh Javali <njavali@marvell.com>
M: GR-QLogic-Storage-Upstream@marvell.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla2xxx

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

View File

@ -1450,7 +1450,8 @@ ENTRY(efi_enter_kernel)
@ running beyond the PoU, and so calling cache_off below from
@ inside the PE/COFF loader allocated region is unsafe unless
@ we explicitly clean it to the PoC.
adr r0, call_cache_fn @ region of code we will
ARM( adrl r0, call_cache_fn )
THUMB( adr r0, call_cache_fn ) @ region of code we will
adr r1, 0f @ run with MMU off
bl cache_clean_flush
bl cache_off

View File

@ -1039,13 +1039,13 @@ fec: ethernet@2188000 {
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupt-names = "int0", "pps";
interrupts-extended =
<&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
<0 119 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET_REF>;
clock-names = "ipg", "ahb", "ptp";
gpr = <&gpr>;
status = "disabled";
};

View File

@ -77,7 +77,6 @@ prg2: prg@21cd000 {
};
&fec {
/delete-property/interrupts-extended;
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
<0 119 IRQ_TYPE_LEVEL_HIGH>;
};

View File

@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[],
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSR operation */
if (val < 32) {
if (val == 0) {
/* An immediate value of 0 encodes a shift amount of 32
* for LSR. To shift by 0, don't do anything.
*/
} else if (val < 32) {
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[],
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do ARSH operation */
if (val < 32) {
if (val == 0) {
/* An immediate value of 0 encodes a shift amount of 32
* for ASR. To shift by 0, don't do anything.
*/
} else if (val < 32) {
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
@ -992,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
arm_bpf_put_reg32(dst_hi, rd[0], ctx);
}
static bool is_ldst_imm(s16 off, const u8 size)
{
s16 off_max = 0;
switch (size) {
case BPF_B:
case BPF_W:
off_max = 0xfff;
break;
case BPF_H:
off_max = 0xff;
break;
case BPF_DW:
/* Need to make sure off+4 does not overflow. */
off_max = 0xfff - 4;
break;
}
return -off_max <= off && off <= off_max;
}
/* *(size *)(dst + off) = src */
static inline void emit_str_r(const s8 dst, const s8 src[],
s32 off, struct jit_ctx *ctx, const u8 sz){
s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
s32 off_max;
s8 rd;
rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
if (sz == BPF_H)
off_max = 0xff;
else
off_max = 0xfff;
if (off < 0 || off > off_max) {
if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
rd = tmp[0];
@ -1035,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
/* dst = *(size*)(src + off) */
static inline void emit_ldx_r(const s8 dst[], const s8 src,
s32 off, struct jit_ctx *ctx, const u8 sz){
s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
s8 rm = src;
s32 off_max;
if (sz == BPF_H)
off_max = 0xff;
else
off_max = 0xfff;
if (off < 0 || off > off_max) {
if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];

View File

@ -36,7 +36,7 @@
#include <linux/mm.h>
struct start_info _xen_start_info;
static struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info;
EXPORT_SYMBOL(xen_start_info);

View File

@ -49,7 +49,9 @@
#ifndef CONFIG_BROKEN_GAS_INST
#ifdef __ASSEMBLY__
#define __emit_inst(x) .inst (x)
// The space separator is omitted so that __emit_inst(x) can be parsed as
// either an assembler directive or an assembler macro argument.
#define __emit_inst(x) .inst(x)
#else
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
#endif

View File

@ -260,18 +260,7 @@ static int __aarch32_alloc_vdso_pages(void)
if (ret)
return ret;
ret = aarch32_alloc_kuser_vdso_page();
if (ret) {
unsigned long c_vvar =
(unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
unsigned long c_vdso =
(unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
free_page(c_vvar);
free_page(c_vdso);
}
return ret;
return aarch32_alloc_kuser_vdso_page();
}
#else
static int __aarch32_alloc_vdso_pages(void)

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += extable.h
generic-y += hardirq.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -55,7 +55,7 @@ config RISCV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select ARCH_HAS_DEBUG_VIRTUAL
select HAVE_EBPF_JIT
select HAVE_EBPF_JIT if MMU
select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_SET_DIRECT_MAP

View File

@ -110,6 +110,16 @@ static bool is_32b_int(s64 val)
return -(1L << 31) <= val && val < (1L << 31);
}
static bool in_auipc_jalr_range(s64 val)
{
/*
* auipc+jalr can reach any signed PC-relative offset in the range
* [-2^31 - 2^11, 2^31 - 2^11).
*/
return (-(1L << 31) - (1L << 11)) <= val &&
val < ((1L << 31) - (1L << 11));
}
static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
{
/* Note that the immediate from the add is sign-extended,
@ -380,20 +390,24 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
*rd = RV_REG_T2;
}
static void emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
struct rv_jit_context *ctx)
static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
struct rv_jit_context *ctx)
{
s64 upper, lower;
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return;
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
upper = (rvoff + (1 << 11)) >> 12;
lower = rvoff & 0xfff;
emit(rv_auipc(RV_REG_T1, upper), ctx);
emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
return 0;
}
upper = (rvoff + (1 << 11)) >> 12;
lower = rvoff & 0xfff;
emit(rv_auipc(RV_REG_T1, upper), ctx);
emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
return -ERANGE;
}
static bool is_signed_bpf_cond(u8 cond)
@ -407,18 +421,16 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
s64 off = 0;
u64 ip;
u8 rd;
int ret;
if (addr && ctx->insns) {
ip = (u64)(long)(ctx->insns + ctx->ninsns);
off = addr - ip;
if (!is_32b_int(off)) {
pr_err("bpf-jit: target call addr %pK is out of range\n",
(void *)addr);
return -ERANGE;
}
}
emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
if (ret)
return ret;
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
emit(rv_addi(rd, RV_REG_A0, 0), ctx);
return 0;
@ -429,7 +441,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
int s, e, rvoff, i = insn - ctx->prog->insnsi;
int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
struct bpf_prog_aux *aux = ctx->prog->aux;
u8 rd = -1, rs = -1, code = insn->code;
s16 off = insn->off;
@ -699,7 +711,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* JUMP off */
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
if (ret)
return ret;
break;
/* IF (dst COND src) JUMP off */
@ -801,7 +815,6 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_JMP | BPF_CALL:
{
bool fixed;
int ret;
u64 addr;
mark_call(ctx);
@ -826,7 +839,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;
rvoff = epilogue_offset(ctx);
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
if (ret)
return ret;
break;
/* dst = imm64 */

View File

@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/cpuhotplug.h>
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
@ -419,11 +420,14 @@ void hyperv_cleanup(void)
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
void hyperv_report_panic(struct pt_regs *regs, long err)
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
{
static bool panic_reported;
u64 guest_id;
if (in_die && !panic_on_oops)
return;
/*
* We prefer to report panic on 'die' chain as we have proper
* registers to report, but if we miss it (e.g. on BUG()) we need

View File

@ -178,8 +178,10 @@ extern void efi_free_boot_services(void);
extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
/* kexec external ABI */
struct efi_setup_data {
u64 fw_vendor;
u64 __unused;
u64 tables;
u64 smbios;
u64 reserved[8];

View File

@ -41,7 +41,7 @@ struct microcode_amd {
unsigned int mpb[0];
};
#define PATCH_MAX_SIZE PAGE_SIZE
#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
#ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family);

View File

@ -1119,35 +1119,53 @@ void switch_to_sld(unsigned long tifn)
sld_update_msr(!(tifn & _TIF_SLD));
}
#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY}
/*
* The following processors have the split lock detection feature. But
* since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot
* be enumerated. Enable it by family and model matching on these
* processors.
* Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
* only be trusted if it is confirmed that a CPU model implements a
* specific feature at a particular bit position.
*
* The possible driver data field values:
*
* - 0: CPU models that are known to have the per-core split-lock detection
* feature even though they do not enumerate IA32_CORE_CAPABILITIES.
*
* - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
* bit 5 to enumerate the per-core split-lock detection feature.
*/
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X),
SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
{}
};
void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
{
u64 ia32_core_caps = 0;
const struct x86_cpu_id *m;
u64 ia32_core_caps;
if (c->x86_vendor != X86_VENDOR_INTEL)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) {
/* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */
m = x86_match_cpu(split_lock_cpu_ids);
if (!m)
return;
switch (m->driver_data) {
case 0:
break;
case 1:
if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
return;
rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
} else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
/* Enumerate split lock detection by family and model. */
if (x86_match_cpu(split_lock_cpu_ids))
ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT;
if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
return;
break;
default:
return;
}
if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
split_lock_setup();
split_lock_setup();
}

View File

@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints);
pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
@ -263,6 +263,16 @@ static void __init ms_hyperv_init_platform(void)
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
}
/*
* Hyper-V expects to get crash register data or kmsg when
* crash enlightment is available and system crashes. Set
* crash_kexec_post_notifiers to be true to make sure that
* calling crash enlightment interface before running kdump
* kernel.
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
crash_kexec_post_notifiers = true;
#ifdef CONFIG_X86_LOCAL_APIC
if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {

View File

@ -578,6 +578,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d->id = id;
cpumask_set_cpu(cpu, &d->cpu_mask);
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(d);
return;

View File

@ -601,5 +601,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */

View File

@ -1859,6 +1859,19 @@ static int set_cache_qos_cfg(int level, bool enable)
return 0;
}
/* Restore the qos cfg state when a domain comes online */
void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
{
if (!r->alloc_capable)
return;
if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
l2_qos_cfg_update(&r->alloc_enabled);
if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
l3_qos_cfg_update(&r->alloc_enabled);
}
/*
* Enable or disable the MBA software controller
* which helps user specify bandwidth in MBps.
@ -3072,7 +3085,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group.
*/
if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
rdtgrp != &rdtgroup_default) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = rdtgroup_ctrl_remove(kn, rdtgrp);

View File

@ -81,7 +81,7 @@
#define UMIP_INST_SLDT 3 /* 0F 00 /0 */
#define UMIP_INST_STR 4 /* 0F 00 /1 */
const char * const umip_insns[5] = {
static const char * const umip_insns[5] = {
[UMIP_INST_SGDT] = "SGDT",
[UMIP_INST_SIDT] = "SIDT",
[UMIP_INST_SMSW] = "SMSW",

View File

@ -202,7 +202,7 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
unsigned long pfn, text, pf;
unsigned long pfn, text, pf, rodata;
struct page *page;
unsigned npages;
pgd_t *pgd = efi_mm.pgd;
@ -256,7 +256,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT;
npages = (_etext - _text) >> PAGE_SHIFT;
text = __pa(_text);
pfn = text >> PAGE_SHIFT;
@ -266,6 +266,14 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 1;
}
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
rodata = __pa(__start_rodata);
pfn = rodata >> PAGE_SHIFT;
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
pr_err("Failed to map kernel rodata 1:1\n");
return 1;
}
return 0;
}
@ -638,7 +646,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
if (!phys_name || !phys_data)
if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,
@ -669,7 +677,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
if (!phys_name || !phys_data)
if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,

View File

@ -1222,8 +1222,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist);
hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
blk_mq_put_driver_tag(rq);
break;
}
if (!blk_mq_get_driver_tag(rq)) {
/*

View File

@ -313,7 +313,7 @@ static void scale_up(struct rq_wb *rwb)
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_wake_all(rwb);
rwb_trace_step(rwb, "scale up");
rwb_trace_step(rwb, tracepoint_string("scale up"));
}
static void scale_down(struct rq_wb *rwb, bool hard_throttle)
@ -322,7 +322,7 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
return;
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_trace_step(rwb, "scale down");
rwb_trace_step(rwb, tracepoint_string("scale down"));
}
static void rwb_arm_timer(struct rq_wb *rwb)

View File

@ -410,6 +410,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */

View File

@ -3891,6 +3891,7 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
else
dev->fwnode = fwnode;
}
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
* device_set_of_node_from_dev - reuse device-tree node of another device

View File

@ -726,6 +726,54 @@ void software_node_unregister_nodes(const struct software_node *nodes)
}
EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
/**
* software_node_register_node_group - Register a group of software nodes
* @node_group: NULL terminated array of software node pointers to be registered
*
* Register multiple software nodes at once.
*/
int software_node_register_node_group(const struct software_node **node_group)
{
unsigned int i;
int ret;
if (!node_group)
return 0;
for (i = 0; node_group[i]; i++) {
ret = software_node_register(node_group[i]);
if (ret) {
software_node_unregister_node_group(node_group);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(software_node_register_node_group);
/**
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
* Unregister multiple software nodes at once.
*/
void software_node_unregister_node_group(const struct software_node **node_group)
{
struct swnode *swnode;
unsigned int i;
if (!node_group)
return;
for (i = 0; node_group[i]; i++) {
swnode = software_node_to_swnode(node_group[i]);
if (swnode)
fwnode_remove_software_node(&swnode->fwnode);
}
}
EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
/**
* software_node_register - Register static software node
* @node: The software node to be registered

View File

@ -3754,11 +3754,7 @@ static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
enum rbd_notify_op notify_op)
{
struct page **reply_pages;
size_t reply_len;
__rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
__rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
}
static void rbd_notify_acquired_lock(struct work_struct *work)
@ -4527,6 +4523,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
cancel_work_sync(&rbd_dev->unlock_work);
}
/*
* header_rwsem must not be held to avoid a deadlock with
* rbd_dev_refresh() when flushing notifies.
*/
static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{
cancel_tasks_sync(rbd_dev);
@ -6894,9 +6894,10 @@ static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
if (rbd_dev->opts)
if (!rbd_is_ro(rbd_dev))
rbd_unregister_watch(rbd_dev);
rbd_dev_unprobe(rbd_dev);
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@ -6907,6 +6908,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
* device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image.
*
* On success, returns with header_rwsem held for write if called
* with @depth == 0.
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{
@ -6936,11 +6940,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
}
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_watch;
goto err_out_probe;
}
/*
@ -6985,10 +6992,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
return 0;
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
if (!depth)
up_write(&rbd_dev->header_rwsem);
if (need_watch)
rbd_unregister_watch(rbd_dev);
rbd_dev_unprobe(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
@ -7050,12 +7058,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_rbd_dev;
}
down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0) {
up_write(&rbd_dev->header_rwsem);
if (rc < 0)
goto err_out_rbd_dev;
}
if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
rbd_warn(rbd_dev, "alloc_size adjusted to %u",

View File

@ -276,7 +276,7 @@ static void __init asm9260_acc_init(struct device_node *np)
/* TODO: Convert to DT parent scheme */
ref_clk = of_clk_get_parent_name(np, 0);
hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
ref_clk, NULL, NULL, 0, rate, 0,
CLK_FIXED_RATE_PARENT_ACCURACY);

View File

@ -97,7 +97,7 @@ static const struct clk_ops mmp_clk_pll_ops = {
.recalc_rate = mmp_clk_pll_recalc_rate,
};
struct clk *mmp_clk_register_pll(char *name,
static struct clk *mmp_clk_register_pll(char *name,
unsigned long default_rate,
void __iomem *enable_reg, u32 enable,
void __iomem *reg, u8 shift,
@ -137,3 +137,34 @@ struct clk *mmp_clk_register_pll(char *name,
return clk;
}
void mmp_register_pll_clks(struct mmp_clk_unit *unit,
struct mmp_param_pll_clk *clks,
void __iomem *base, int size)
{
struct clk *clk;
int i;
for (i = 0; i < size; i++) {
void __iomem *reg = NULL;
if (clks[i].offset)
reg = base + clks[i].offset;
clk = mmp_clk_register_pll(clks[i].name,
clks[i].default_rate,
base + clks[i].enable_offset,
clks[i].enable,
reg, clks[i].shift,
clks[i].input_rate,
base + clks[i].postdiv_offset,
clks[i].postdiv_shift);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
continue;
}
if (clks[i].id)
unit->clk_table[clks[i].id] = clk;
}
}

View File

@ -176,37 +176,6 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit,
}
}
void mmp_register_pll_clks(struct mmp_clk_unit *unit,
struct mmp_param_pll_clk *clks,
void __iomem *base, int size)
{
struct clk *clk;
int i;
for (i = 0; i < size; i++) {
void __iomem *reg = NULL;
if (clks[i].offset)
reg = base + clks[i].offset;
clk = mmp_clk_register_pll(clks[i].name,
clks[i].default_rate,
base + clks[i].enable_offset,
clks[i].enable,
reg, clks[i].shift,
clks[i].input_rate,
base + clks[i].postdiv_offset,
clks[i].postdiv_shift);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
continue;
}
if (clks[i].id)
unit->clk_table[clks[i].id] = clk;
}
}
void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
struct clk *clk)
{

View File

@ -238,13 +238,6 @@ void mmp_register_pll_clks(struct mmp_clk_unit *unit,
struct mmp_param_pll_clk *clks,
void __iomem *base, int size);
extern struct clk *mmp_clk_register_pll(char *name,
unsigned long default_rate,
void __iomem *enable_reg, u32 enable,
void __iomem *reg, u8 shift,
unsigned long input_rate,
void __iomem *postdiv_reg, u8 postdiv_shift);
#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
{ \
.width_div = (w_d), \

View File

@ -1641,8 +1641,9 @@ static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m", 0x0,
0x1000, BIT(12), 0, 0);
static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m", 0x0,
0x1000, BIT(13), 0, 0);
/* uart1_eb is for console, don't gate even if unused */
static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m", 0x0,
0x1000, BIT(14), 0, 0);
0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m", 0x0,
0x1000, BIT(15), 0, 0);
static SPRD_SC_GATE_CLK_FW_NAME(uart3_eb, "uart3-eb", "ext-26m", 0x0,

View File

@ -101,7 +101,7 @@ void cper_print_bits(const char *pfx, unsigned int bits,
if (!len)
len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
else
len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
}
if (len)
printk("%s\n", buf);

View File

@ -25,7 +25,7 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
#ifdef CONFIG_ARM
#if defined(CONFIG_ARM) || defined(CONFIG_X86)
#define __efistub_global __section(.data)
#else
#define __efistub_global

View File

@ -29,30 +29,31 @@
*/
#define EFI_READ_CHUNK_SIZE SZ_1M
struct finfo {
efi_file_info_t info;
efi_char16_t filename[MAX_FILENAME_SIZE];
};
static efi_status_t efi_open_file(efi_file_protocol_t *volume,
efi_char16_t *filename_16,
struct finfo *fi,
efi_file_protocol_t **handle,
unsigned long *file_size)
{
struct {
efi_file_info_t info;
efi_char16_t filename[MAX_FILENAME_SIZE];
} finfo;
efi_guid_t info_guid = EFI_FILE_INFO_ID;
efi_file_protocol_t *fh;
unsigned long info_sz;
efi_status_t status;
status = volume->open(volume, &fh, filename_16, EFI_FILE_MODE_READ, 0);
status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
pr_efi_err("Failed to open file: ");
efi_char16_printk(filename_16);
efi_char16_printk(fi->filename);
efi_printk("\n");
return status;
}
info_sz = sizeof(finfo);
status = fh->get_info(fh, &info_guid, &info_sz, &finfo);
info_sz = sizeof(struct finfo);
status = fh->get_info(fh, &info_guid, &info_sz, fi);
if (status != EFI_SUCCESS) {
pr_efi_err("Failed to get file info\n");
fh->close(fh);
@ -60,7 +61,7 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
}
*handle = fh;
*file_size = finfo.info.file_size;
*file_size = fi->info.file_size;
return EFI_SUCCESS;
}
@ -146,13 +147,13 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
alloc_addr = alloc_size = 0;
do {
efi_char16_t filename[MAX_FILENAME_SIZE];
struct finfo fi;
unsigned long size;
void *addr;
offset = find_file_option(cmdline, cmdline_len,
optstr, optstr_size,
filename, ARRAY_SIZE(filename));
fi.filename, ARRAY_SIZE(fi.filename));
if (!offset)
break;
@ -166,7 +167,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
return status;
}
status = efi_open_file(volume, filename, &file, &size);
status = efi_open_file(volume, &fi, &file, &size);
if (status != EFI_SUCCESS)
goto err_close_volume;

View File

@ -20,7 +20,7 @@
/* Maximum physical address for 64-bit kernel with 4-level paging */
#define MAXMEM_X86_64_4LEVEL (1ull << 46)
static efi_system_table_t *sys_table;
static efi_system_table_t *sys_table __efistub_global;
extern const bool efi_is64;
extern u32 image_offset;
@ -392,8 +392,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
image_base = efi_table_attr(image, image_base);
image_offset = (void *)startup_32 - image_base;
hdr = &((struct boot_params *)image_base)->hdr;
status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX);
if (status != EFI_SUCCESS) {
efi_printk("Failed to allocate lowmem for boot params\n");
@ -742,8 +740,15 @@ unsigned long efi_main(efi_handle_t handle,
* now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
* KASLR uses.
*
* Also relocate it if image_offset is zero, i.e. we weren't loaded by
* LoadImage, but we are not aligned correctly.
* Also relocate it if image_offset is zero, i.e. the kernel wasn't
* loaded by LoadImage, but rather by a bootloader that called the
* handover entry. The reason we must always relocate in this case is
* to handle the case of systemd-boot booting a unified kernel image,
* which is a PE executable that contains the bzImage and an initrd as
* COFF sections. The initrd section is placed after the bzImage
* without ensuring that there are at least init_size bytes available
* for the bzImage, and thus the compressed kernel's startup code may
* overwrite the initrd unless it is moved out of the way.
*/
buffer_start = ALIGN(bzimage_addr - image_offset,
@ -753,8 +758,7 @@ unsigned long efi_main(efi_handle_t handle,
if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
(IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
(IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
(image_offset == 0 && !IS_ALIGNED(bzimage_addr,
hdr->kernel_alignment))) {
(image_offset == 0)) {
status = efi_relocate_kernel(&bzimage_addr,
hdr->init_size, hdr->init_size,
hdr->pref_address,

View File

@ -2008,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
*/
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
return !!memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM);
if (memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM))
return true;
if (!adev->in_gpu_reset)
return false;
/*
* For all ASICs with baco/mode1 reset, the VRAM is
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
default:
return false;
}
}
/**
@ -2340,6 +2356,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)

View File

@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);

View File

@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))

View File

@ -1234,6 +1234,8 @@ struct amdgpu_gfxoff_quirk {
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
{ 0, 0, 0, 0, 0 },
};

View File

@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
ret = smu_baco_enter(smu);
if (ret)
return ret;
@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (ret)
return ret;
} else {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}

View File

@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return amdgpu_dpm_mode2_reset(adev);
default:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}

View File

@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);

View File

@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{
uint32_t i;
/* force the trim if mclk_switching is disabled to prevent flicker */
bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/
if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
if ((!hwmgr->od_enabled || force_trim)
&& (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else

View File

@ -1718,6 +1718,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (ret)
goto out;
if (ras && ras->supported) {
ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
if (ret)
goto out;
}
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);

View File

@ -131,6 +131,7 @@ struct kvmgt_vdev {
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@ -183,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register;
}
vdev->vfio_group = vfio_group;
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
return ret;
undo_group:
vfio_group_put_external_user(vdev->vfio_group);
vdev->vfio_group = NULL;
undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
vfio_group_put_external_user(vdev->vfio_group);
vdev->kvm = NULL;
vgpu->handle = 0;
@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
int idx, ret;
bool kthread = current->mm == NULL;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
if (kthread) {
if (!mmget_not_zero(kvm->mm))
return -EFAULT;
use_mm(kvm->mm);
}
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
if (kthread) {
unuse_mm(kvm->mm);
mmput(kvm->mm);
}
return ret;
return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
gpa, buf, len, write);
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,

View File

@ -2940,49 +2940,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
gen8_update_reg_state_unlocked(ce, stream);
}
/**
* i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @buf: destination buffer given by userspace
* @count: the number of bytes userspace wants to read
* @ppos: (inout) file seek position (unused)
*
* Besides wrapping &i915_perf_stream_ops->read this provides a common place to
* ensure that if we've successfully copied any data then reporting that takes
* precedence over any internal error status, so the data isn't lost.
*
* For example ret will be -ENOSPC whenever there is more buffered data than
* can be copied to userspace, but that's only interesting if we weren't able
* to copy some data because it implies the userspace buffer is too small to
* receive a single record (and we never split records).
*
* Another case with ret == -EFAULT is more of a grey area since it would seem
* like bad form for userspace to ask us to overrun its buffer, but the user
* knows best:
*
* http://yarchive.net/comp/linux/partial_reads_writes.html
*
* Returns: The number of bytes copied or a negative error code on failure.
*/
static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
/* Note we keep the offset (aka bytes read) separate from any
* error status so that the final check for whether we return
* the bytes read with a higher precedence than any error (see
* comment below) doesn't need to be handled/duplicated in
* stream->ops->read() implementations.
*/
size_t offset = 0;
int ret = stream->ops->read(stream, buf, count, &offset);
return offset ?: (ret ?: -EAGAIN);
}
/**
* i915_perf_read - handles read() FOP for i915 perf stream FDs
* @file: An i915 perf stream file
@ -3008,7 +2965,8 @@ static ssize_t i915_perf_read(struct file *file,
{
struct i915_perf_stream *stream = file->private_data;
struct i915_perf *perf = stream->perf;
ssize_t ret;
size_t offset = 0;
int ret;
/* To ensure it's handled consistently we simply treat all reads of a
* disabled stream as an error. In particular it might otherwise lead
@ -3031,13 +2989,12 @@ static ssize_t i915_perf_read(struct file *file,
return ret;
mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file,
buf, count, ppos);
ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
} while (ret == -EAGAIN);
} while (!offset && !ret);
} else {
mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
}
@ -3048,15 +3005,15 @@ static ssize_t i915_perf_read(struct file *file,
* and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback
* before reporting another EPOLLIN event.
* The exception to this is if ops->read() returned -ENOSPC which means
* that more OA data is available than could fit in the user provided
* buffer. In this case we want the next poll() call to not block.
*/
if (ret >= 0 || ret == -EAGAIN) {
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
if (ret != -ENOSPC)
stream->pollin = false;
}
return ret;
/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
return offset ?: (ret ?: -EAGAIN);
}
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)

View File

@ -25,6 +25,9 @@
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
static const struct nvkm_sec2_fwif
gp108_sec2_fwif[] = {

View File

@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
return 0;
}
MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },

View File

@ -839,6 +839,9 @@ void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
return;
/* Pre-Win2012R2 hosts don't support reconnect */
if (vmbus_proto_version < VERSION_WIN8_1)
return;

View File

@ -11,7 +11,7 @@
#include "hyperv_vmbus.h"
struct dentry *hv_debug_root;
static struct dentry *hv_debug_root;
static int hv_debugfs_delay_get(void *data, u64 *val)
{

View File

@ -292,7 +292,7 @@ struct vmbus_msginfo {
struct list_head msglist_entry;
/* The message itself */
unsigned char msg[0];
unsigned char msg[];
};

View File

@ -31,6 +31,7 @@
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@ -48,14 +49,35 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
* It can be set via /proc/sys/kernel/hyperv/record_panic_msg
*/
static int sysctl_record_panic_msg = 1;
static int hyperv_report_reg(void)
{
return !sysctl_record_panic_msg || !hv_panic_page;
}
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
struct pt_regs *regs;
regs = current_pt_regs();
vmbus_initiate_unload(true);
hyperv_report_panic(regs, val);
/*
* Hyper-V should be notified only once about a panic. If we will be
* doing hyperv_report_panic_msg() later with kmsg data, don't do
* the notification here.
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
&& hyperv_report_reg()) {
regs = current_pt_regs();
hyperv_report_panic(regs, val, false);
}
return NOTIFY_DONE;
}
@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
hyperv_report_panic(regs, val);
/*
* Hyper-V should be notified only once about a panic. If we will be
* doing hyperv_report_panic_msg() later with kmsg data, don't do
* the notification here.
*/
if (hyperv_report_reg())
hyperv_report_panic(regs, val, true);
return NOTIFY_DONE;
}
@ -1252,13 +1280,6 @@ static void vmbus_isr(void)
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
* It can be set via /proc/sys/kernel/hyperv/record_panic_msg
*/
static int sysctl_record_panic_msg = 1;
/*
* Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
* buffer and call into Hyper-V to transfer the data.
@ -1382,19 +1403,29 @@ static int vmbus_bus_init(void)
hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret)
if (ret) {
pr_err("Hyper-V: kmsg dump register "
"error 0x%x\n", ret);
hv_free_hyperv_page(
(unsigned long)hv_panic_page);
hv_panic_page = NULL;
}
} else
pr_err("Hyper-V: panic message page memory "
"allocation failed");
}
register_die_notifier(&hyperv_die_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
}
/*
* Always register the panic notifier because we need to unload
* the VMbus channel connection to prevent any VMbus
* activity after the VM panics.
*/
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
vmbus_request_offers();
return 0;
@ -1407,7 +1438,6 @@ static int vmbus_bus_init(void)
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
hv_free_hyperv_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@ -2204,8 +2234,6 @@ static int vmbus_bus_suspend(struct device *dev)
vmbus_initiate_unload(false);
vmbus_connection.conn_state = DISCONNECTED;
/* Reset the event for the next resume. */
reinit_completion(&vmbus_connection.ready_for_resume_event);
@ -2289,7 +2317,6 @@ static void hv_kexec_handler(void)
{
hv_stimer_global_cleanup();
vmbus_initiate_unload(false);
vmbus_connection.conn_state = DISCONNECTED;
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
@ -2306,7 +2333,6 @@ static void hv_crash_handler(struct pt_regs *regs)
* doing the cleanup for current CPU only. This should be sufficient
* for kdump.
*/
vmbus_connection.conn_state = DISCONNECTED;
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);

View File

@ -412,7 +412,7 @@ config SENSORS_DRIVETEMP
hard disk drives.
This driver can also be built as a module. If so, the module
will be called satatemp.
will be called drivetemp.
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"

View File

@ -264,12 +264,18 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
return err;
switch (attr) {
case hwmon_temp_input:
if (!temp_is_valid(buf[SCT_STATUS_TEMP]))
return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP]);
break;
case hwmon_temp_lowest:
if (!temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]))
return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
break;
case hwmon_temp_highest:
if (!temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]))
return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
break;
default:

View File

@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
data, &jc42_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);

View File

@ -186,7 +186,7 @@ static long get_raw_temp(struct k10temp_data *data)
return temp;
}
const char *k10temp_temp_label[] = {
static const char *k10temp_temp_label[] = {
"Tctl",
"Tdie",
"Tccd1",
@ -199,12 +199,12 @@ const char *k10temp_temp_label[] = {
"Tccd8",
};
const char *k10temp_in_label[] = {
static const char *k10temp_in_label[] = {
"Vcore",
"Vsoc",
};
const char *k10temp_curr_label[] = {
static const char *k10temp_curr_label[] = {
"Icore",
"Isoc",
};

View File

@ -21,8 +21,50 @@
#define ISL68137_VOUT_AVS 0x30
#define RAA_DMPVR2_READ_VMON 0xc8
enum versions {
enum chips {
isl68137,
isl68220,
isl68221,
isl68222,
isl68223,
isl68224,
isl68225,
isl68226,
isl68227,
isl68229,
isl68233,
isl68239,
isl69222,
isl69223,
isl69224,
isl69225,
isl69227,
isl69228,
isl69234,
isl69236,
isl69239,
isl69242,
isl69243,
isl69247,
isl69248,
isl69254,
isl69255,
isl69256,
isl69259,
isl69260,
isl69268,
isl69269,
isl69298,
raa228000,
raa228004,
raa228006,
raa228228,
raa229001,
raa229004,
};
enum variants {
raa_dmpvr1_2rail,
raa_dmpvr2_1rail,
raa_dmpvr2_2rail,
raa_dmpvr2_3rail,
@ -186,7 +228,7 @@ static int isl68137_probe(struct i2c_client *client,
memcpy(info, &raa_dmpvr_info, sizeof(*info));
switch (id->driver_data) {
case isl68137:
case raa_dmpvr1_2rail:
info->pages = 2;
info->R[PSC_VOLTAGE_IN] = 3;
info->func[0] &= ~PMBUS_HAVE_VMON;
@ -224,11 +266,47 @@ static int isl68137_probe(struct i2c_client *client,
}
static const struct i2c_device_id raa_dmpvr_id[] = {
{"isl68137", isl68137},
{"raa_dmpvr2_1rail", raa_dmpvr2_1rail},
{"raa_dmpvr2_2rail", raa_dmpvr2_2rail},
{"raa_dmpvr2_3rail", raa_dmpvr2_3rail},
{"raa_dmpvr2_hv", raa_dmpvr2_hv},
{"isl68137", raa_dmpvr1_2rail},
{"isl68220", raa_dmpvr2_2rail},
{"isl68221", raa_dmpvr2_3rail},
{"isl68222", raa_dmpvr2_2rail},
{"isl68223", raa_dmpvr2_2rail},
{"isl68224", raa_dmpvr2_3rail},
{"isl68225", raa_dmpvr2_2rail},
{"isl68226", raa_dmpvr2_3rail},
{"isl68227", raa_dmpvr2_1rail},
{"isl68229", raa_dmpvr2_3rail},
{"isl68233", raa_dmpvr2_2rail},
{"isl68239", raa_dmpvr2_3rail},
{"isl69222", raa_dmpvr2_2rail},
{"isl69223", raa_dmpvr2_3rail},
{"isl69224", raa_dmpvr2_2rail},
{"isl69225", raa_dmpvr2_2rail},
{"isl69227", raa_dmpvr2_3rail},
{"isl69228", raa_dmpvr2_3rail},
{"isl69234", raa_dmpvr2_2rail},
{"isl69236", raa_dmpvr2_2rail},
{"isl69239", raa_dmpvr2_3rail},
{"isl69242", raa_dmpvr2_2rail},
{"isl69243", raa_dmpvr2_1rail},
{"isl69247", raa_dmpvr2_2rail},
{"isl69248", raa_dmpvr2_2rail},
{"isl69254", raa_dmpvr2_2rail},
{"isl69255", raa_dmpvr2_2rail},
{"isl69256", raa_dmpvr2_2rail},
{"isl69259", raa_dmpvr2_2rail},
{"isl69260", raa_dmpvr2_2rail},
{"isl69268", raa_dmpvr2_2rail},
{"isl69269", raa_dmpvr2_3rail},
{"isl69298", raa_dmpvr2_2rail},
{"raa228000", raa_dmpvr2_hv},
{"raa228004", raa_dmpvr2_hv},
{"raa228006", raa_dmpvr2_hv},
{"raa228228", raa_dmpvr2_2rail},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
{}
};

View File

@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
struct altr_i2c_dev *idev = NULL;
struct resource *res;
int irq, ret;
u32 val;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev)
@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock);
val = device_property_read_u32(idev->dev, "fifo-size",
ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size);
if (val) {
if (ret) {
dev_err(&pdev->dev, "FIFO size set to default of %d\n",
ALTR_I2C_DFLT_FIFO_SZ);
idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
}
val = device_property_read_u32(idev->dev, "clock-frequency",
ret = device_property_read_u32(idev->dev, "clock-frequency",
&idev->bus_clk_rate);
if (val) {
if (ret) {
dev_err(&pdev->dev, "Default to 100kHz\n");
idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
}

View File

@ -354,10 +354,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_LEAVE_SUSPENDED);
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
DPM_FLAG_LEAVE_SUSPENDED);
} else {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_LEAVE_SUSPENDED);
}
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));

View File

@ -996,14 +996,13 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
do {
u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
if (status) {
if (status)
tegra_i2c_isr(i2c_dev->irq, i2c_dev);
if (completion_done(complete)) {
s64 delta = ktime_ms_delta(ktimeout, ktime);
if (completion_done(complete)) {
s64 delta = ktime_ms_delta(ktimeout, ktime);
return msecs_to_jiffies(delta) ?: 1;
}
return msecs_to_jiffies(delta) ?: 1;
}
ktime = ktime_get();
@ -1030,14 +1029,18 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
disable_irq(i2c_dev->irq);
/*
* There is a chance that completion may happen after IRQ
* synchronization, which is done by disable_irq().
* Under some rare circumstances (like running KASAN +
* NFS root) CPU, which handles interrupt, may stuck in
* uninterruptible state for a significant time. In this
* case we will get timeout if I2C transfer is running on
* a sibling CPU, despite of IRQ being raised.
*
* In order to handle this rare condition, the IRQ status
* needs to be checked after timeout.
*/
if (ret == 0 && completion_done(complete)) {
dev_warn(i2c_dev->dev,
"completion done after timeout\n");
ret = 1;
}
if (ret == 0)
ret = tegra_i2c_poll_completion_timeout(i2c_dev,
complete, 0);
}
return ret;
@ -1216,6 +1219,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
time_left = tegra_i2c_wait_completion_timeout(
i2c_dev, &i2c_dev->dma_complete, xfer_time);
/*
* Synchronize DMA first, since dmaengine_terminate_sync()
* performs synchronization after the transfer's termination
* and we want to get a completion if transfer succeeded.
*/
dmaengine_synchronize(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan);
dmaengine_terminate_sync(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan);

View File

@ -2273,19 +2273,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_scanned_device);
struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
struct i2c_board_info *info,
unsigned short const *addr_list,
int (*probe)(struct i2c_adapter *adap, unsigned short addr))
{
struct i2c_client *client;
client = i2c_new_scanned_device(adap, info, addr_list, probe);
return IS_ERR(client) ? NULL : client;
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
struct i2c_adapter *i2c_get_adapter(int nr)
{
struct i2c_adapter *adapter;

View File

@ -416,7 +416,7 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = {
.map = bcm7038_l1_map,
};
int __init bcm7038_l1_of_init(struct device_node *dn,
static int __init bcm7038_l1_of_init(struct device_node *dn,
struct device_node *parent)
{
struct bcm7038_l1_chip *intc;

View File

@ -14,6 +14,7 @@
#include <linux/dma-iommu.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/log2.h>
@ -3672,6 +3673,20 @@ static int its_vpe_set_affinity(struct irq_data *d,
return IRQ_SET_MASK_OK_DONE;
}
static void its_wait_vpt_parse_complete(void)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
u64 val;
if (!gic_rdists->has_vpend_valid_dirty)
return;
WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
val,
!(val & GICR_VPENDBASER_Dirty),
10, 500));
}
static void its_vpe_schedule(struct its_vpe *vpe)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@ -3702,6 +3717,8 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
its_wait_vpt_parse_complete();
}
static void its_vpe_deschedule(struct its_vpe *vpe)
@ -3910,6 +3927,8 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
its_wait_vpt_parse_complete();
}
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
@ -4035,6 +4054,7 @@ static int its_sgi_set_affinity(struct irq_data *d,
* not on the host (since they can only be targetting a vPE).
* Tell the kernel we've done whatever it asked for.
*/
irq_data_update_effective_affinity(d, mask_val);
return IRQ_SET_MASK_OK;
}

View File

@ -873,6 +873,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
gic_data.rdists.has_rvpeid);
gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
/* Detect non-sensical configurations */
if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
@ -893,10 +894,11 @@ static void gic_update_rdist_properties(void)
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
!gic_data.rdists.has_direct_lpi ? "no " : "",
!gic_data.rdists.has_rvpeid ? "no " : "");
if (gic_data.rdists.has_vlpis)
pr_info("GICv4 features: %s%s%s\n",
gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
gic_data.rdists.has_rvpeid ? "RVPEID " : "",
gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
/* Check whether it's single security state view */
@ -1620,6 +1622,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
gic_data.rdists.has_vpend_valid_dirty = true;
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More