mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 05:30:52 +07:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c net/ipv6/ip6_tunnel.c net/ipv6/ip6_vti.c ipv6 tunnel statistic bug fixes conflicting with consolidation into generic sw per-cpu net stats. qlogic conflict between queue counting bug fix and the addition of multiple MAC address support. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
56a4342dfe
72
Documentation/block/null_blk.txt
Normal file
72
Documentation/block/null_blk.txt
Normal file
@ -0,0 +1,72 @@
|
||||
Null block device driver
|
||||
================================================================================
|
||||
|
||||
I. Overview
|
||||
|
||||
The null block device (/dev/nullb*) is used for benchmarking the various
|
||||
block-layer implementations. It emulates a block device of X gigabytes in size.
|
||||
The following instances are possible:
|
||||
|
||||
Single-queue block-layer
|
||||
- Request-based.
|
||||
- Single submission queue per device.
|
||||
- Implements IO scheduling algorithms (CFQ, Deadline, noop).
|
||||
Multi-queue block-layer
|
||||
- Request-based.
|
||||
- Configurable submission queues per device.
|
||||
No block-layer (Known as bio-based)
|
||||
- Bio-based. IO requests are submitted directly to the device driver.
|
||||
- Directly accepts bio data structure and returns them.
|
||||
|
||||
All of them have a completion queue for each core in the system.
|
||||
|
||||
II. Module parameters applicable for all instances:
|
||||
|
||||
queue_mode=[0-2]: Default: 2-Multi-queue
|
||||
Selects which block-layer the module should instantiate with.
|
||||
|
||||
0: Bio-based.
|
||||
1: Single-queue.
|
||||
2: Multi-queue.
|
||||
|
||||
home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
|
||||
Selects what CPU node the data structures are allocated from.
|
||||
|
||||
gb=[Size in GB]: Default: 250GB
|
||||
The size of the device reported to the system.
|
||||
|
||||
bs=[Block size (in bytes)]: Default: 512 bytes
|
||||
The block size reported to the system.
|
||||
|
||||
nr_devices=[Number of devices]: Default: 2
|
||||
Number of block devices instantiated. They are instantiated as /dev/nullb0,
|
||||
etc.
|
||||
|
||||
irq_mode=[0-2]: Default: 1-Soft-irq
|
||||
The completion mode used for completing IOs to the block-layer.
|
||||
|
||||
0: None.
|
||||
1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
|
||||
when IOs are issued from another CPU node than the home the device is
|
||||
connected to.
|
||||
2: Timer: Waits a specific period (completion_nsec) for each IO before
|
||||
completion.
|
||||
|
||||
completion_nsec=[ns]: Default: 10.000ns
|
||||
Combined with irq_mode=2 (timer). The time each completion event must wait.
|
||||
|
||||
submit_queues=[0..nr_cpus]:
|
||||
The number of submission queues attached to the device driver. If unset, it
|
||||
defaults to 1 on single-queue and bio-based instances. For multi-queue,
|
||||
it is ignored when use_per_node_hctx module parameter is 1.
|
||||
|
||||
hw_queue_depth=[0..qdepth]: Default: 64
|
||||
The hardware queue depth of the device.
|
||||
|
||||
III: Multi-queue specific parameters
|
||||
|
||||
use_per_node_hctx=[0/1]: Default: 0
|
||||
0: The number of submit queues are set to the value of the submit_queues
|
||||
parameter.
|
||||
1: The multi-queue block layer is instantiated with a hardware dispatch
|
||||
queue for each CPU node in the system.
|
@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
|
||||
* atapi_dmadir: Enable ATAPI DMADIR bridge support
|
||||
|
||||
* disable: Disable this device.
|
||||
|
||||
If there are multiple matching configurations changing
|
||||
the same attribute, the last one is used.
|
||||
|
||||
|
240
Documentation/module-signing.txt
Normal file
240
Documentation/module-signing.txt
Normal file
@ -0,0 +1,240 @@
|
||||
==============================
|
||||
KERNEL MODULE SIGNING FACILITY
|
||||
==============================
|
||||
|
||||
CONTENTS
|
||||
|
||||
- Overview.
|
||||
- Configuring module signing.
|
||||
- Generating signing keys.
|
||||
- Public keys in the kernel.
|
||||
- Manually signing modules.
|
||||
- Signed modules and stripping.
|
||||
- Loading signed modules.
|
||||
- Non-valid signatures and unsigned modules.
|
||||
- Administering/protecting the private key.
|
||||
|
||||
|
||||
========
|
||||
OVERVIEW
|
||||
========
|
||||
|
||||
The kernel module signing facility cryptographically signs modules during
|
||||
installation and then checks the signature upon loading the module. This
|
||||
allows increased kernel security by disallowing the loading of unsigned modules
|
||||
or modules signed with an invalid key. Module signing increases security by
|
||||
making it harder to load a malicious module into the kernel. The module
|
||||
signature checking is done by the kernel so that it is not necessary to have
|
||||
trusted userspace bits.
|
||||
|
||||
This facility uses X.509 ITU-T standard certificates to encode the public keys
|
||||
involved. The signatures are not themselves encoded in any industrial standard
|
||||
type. The facility currently only supports the RSA public key encryption
|
||||
standard (though it is pluggable and permits others to be used). The possible
|
||||
hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
|
||||
SHA-512 (the algorithm is selected by data in the signature).
|
||||
|
||||
|
||||
==========================
|
||||
CONFIGURING MODULE SIGNING
|
||||
==========================
|
||||
|
||||
The module signing facility is enabled by going to the "Enable Loadable Module
|
||||
Support" section of the kernel configuration and turning on
|
||||
|
||||
CONFIG_MODULE_SIG "Module signature verification"
|
||||
|
||||
This has a number of options available:
|
||||
|
||||
(1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
|
||||
|
||||
This specifies how the kernel should deal with a module that has a
|
||||
signature for which the key is not known or a module that is unsigned.
|
||||
|
||||
If this is off (ie. "permissive"), then modules for which the key is not
|
||||
available and modules that are unsigned are permitted, but the kernel will
|
||||
be marked as being tainted.
|
||||
|
||||
If this is on (ie. "restrictive"), only modules that have a valid
|
||||
signature that can be verified by a public key in the kernel's possession
|
||||
will be loaded. All other modules will generate an error.
|
||||
|
||||
Irrespective of the setting here, if the module has a signature block that
|
||||
cannot be parsed, it will be rejected out of hand.
|
||||
|
||||
|
||||
(2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
|
||||
|
||||
If this is on then modules will be automatically signed during the
|
||||
modules_install phase of a build. If this is off, then the modules must
|
||||
be signed manually using:
|
||||
|
||||
scripts/sign-file
|
||||
|
||||
|
||||
(3) "Which hash algorithm should modules be signed with?"
|
||||
|
||||
This presents a choice of which hash algorithm the installation phase will
|
||||
sign the modules with:
|
||||
|
||||
CONFIG_SIG_SHA1 "Sign modules with SHA-1"
|
||||
CONFIG_SIG_SHA224 "Sign modules with SHA-224"
|
||||
CONFIG_SIG_SHA256 "Sign modules with SHA-256"
|
||||
CONFIG_SIG_SHA384 "Sign modules with SHA-384"
|
||||
CONFIG_SIG_SHA512 "Sign modules with SHA-512"
|
||||
|
||||
The algorithm selected here will also be built into the kernel (rather
|
||||
than being a module) so that modules signed with that algorithm can have
|
||||
their signatures checked without causing a dependency loop.
|
||||
|
||||
|
||||
=======================
|
||||
GENERATING SIGNING KEYS
|
||||
=======================
|
||||
|
||||
Cryptographic keypairs are required to generate and check signatures. A
|
||||
private key is used to generate a signature and the corresponding public key is
|
||||
used to check it. The private key is only needed during the build, after which
|
||||
it can be deleted or stored securely. The public key gets built into the
|
||||
kernel so that it can be used to check the signatures as the modules are
|
||||
loaded.
|
||||
|
||||
Under normal conditions, the kernel build will automatically generate a new
|
||||
keypair using openssl if one does not exist in the files:
|
||||
|
||||
signing_key.priv
|
||||
signing_key.x509
|
||||
|
||||
during the building of vmlinux (the public part of the key needs to be built
|
||||
into vmlinux) using parameters in the:
|
||||
|
||||
x509.genkey
|
||||
|
||||
file (which is also generated if it does not already exist).
|
||||
|
||||
It is strongly recommended that you provide your own x509.genkey file.
|
||||
|
||||
Most notably, in the x509.genkey file, the req_distinguished_name section
|
||||
should be altered from the default:
|
||||
|
||||
[ req_distinguished_name ]
|
||||
O = Magrathea
|
||||
CN = Glacier signing key
|
||||
emailAddress = slartibartfast@magrathea.h2g2
|
||||
|
||||
The generated RSA key size can also be set with:
|
||||
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
|
||||
|
||||
It is also possible to manually generate the key private/public files using the
|
||||
x509.genkey key generation configuration file in the root node of the Linux
|
||||
kernel sources tree and the openssl command. The following is an example to
|
||||
generate the public/private key files:
|
||||
|
||||
openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
|
||||
-config x509.genkey -outform DER -out signing_key.x509 \
|
||||
-keyout signing_key.priv
|
||||
|
||||
|
||||
=========================
|
||||
PUBLIC KEYS IN THE KERNEL
|
||||
=========================
|
||||
|
||||
The kernel contains a ring of public keys that can be viewed by root. They're
|
||||
in a keyring called ".system_keyring" that can be seen by:
|
||||
|
||||
[root@deneb ~]# cat /proc/keys
|
||||
...
|
||||
223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
|
||||
302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
|
||||
...
|
||||
|
||||
Beyond the public key generated specifically for module signing, any file
|
||||
placed in the kernel source root directory or the kernel build root directory
|
||||
whose name is suffixed with ".x509" will be assumed to be an X.509 public key
|
||||
and will be added to the keyring.
|
||||
|
||||
Further, the architecture code may take public keys from a hardware store and
|
||||
add those in also (e.g. from the UEFI key database).
|
||||
|
||||
Finally, it is possible to add additional public keys by doing:
|
||||
|
||||
keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
|
||||
|
||||
e.g.:
|
||||
|
||||
keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
|
||||
|
||||
Note, however, that the kernel will only permit keys to be added to
|
||||
.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
|
||||
that is already resident in the .system_keyring at the time the key was added.
|
||||
|
||||
|
||||
=========================
|
||||
MANUALLY SIGNING MODULES
|
||||
=========================
|
||||
|
||||
To manually sign a module, use the scripts/sign-file tool available in
|
||||
the Linux kernel source tree. The script requires 4 arguments:
|
||||
|
||||
1. The hash algorithm (e.g., sha256)
|
||||
2. The private key filename
|
||||
3. The public key filename
|
||||
4. The kernel module to be signed
|
||||
|
||||
The following is an example to sign a kernel module:
|
||||
|
||||
scripts/sign-file sha512 kernel-signkey.priv \
|
||||
kernel-signkey.x509 module.ko
|
||||
|
||||
The hash algorithm used does not have to match the one configured, but if it
|
||||
doesn't, you should make sure that hash algorithm is either built into the
|
||||
kernel or can be loaded without requiring itself.
|
||||
|
||||
|
||||
============================
|
||||
SIGNED MODULES AND STRIPPING
|
||||
============================
|
||||
|
||||
A signed module has a digital signature simply appended at the end. The string
|
||||
"~Module signature appended~." at the end of the module's file confirms that a
|
||||
signature is present but it does not confirm that the signature is valid!
|
||||
|
||||
Signed modules are BRITTLE as the signature is outside of the defined ELF
|
||||
container. Thus they MAY NOT be stripped once the signature is computed and
|
||||
attached. Note the entire module is the signed payload, including any and all
|
||||
debug information present at the time of signing.
|
||||
|
||||
|
||||
======================
|
||||
LOADING SIGNED MODULES
|
||||
======================
|
||||
|
||||
Modules are loaded with insmod, modprobe, init_module() or finit_module(),
|
||||
exactly as for unsigned modules as no processing is done in userspace. The
|
||||
signature checking is all done within the kernel.
|
||||
|
||||
|
||||
=========================================
|
||||
NON-VALID SIGNATURES AND UNSIGNED MODULES
|
||||
=========================================
|
||||
|
||||
If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
|
||||
the kernel command line, the kernel will only load validly signed modules
|
||||
for which it has a public key. Otherwise, it will also load modules that are
|
||||
unsigned. Any module for which the kernel has a key, but which proves to have
|
||||
a signature mismatch will not be permitted to load.
|
||||
|
||||
Any module that has an unparseable signature will be rejected.
|
||||
|
||||
|
||||
=========================================
|
||||
ADMINISTERING/PROTECTING THE PRIVATE KEY
|
||||
=========================================
|
||||
|
||||
Since the private key is used to sign modules, viruses and malware could use
|
||||
the private key to sign modules and compromise the operating system. The
|
||||
private key must be either destroyed or moved to a secure location and not kept
|
||||
in the root node of the kernel source tree.
|
16
MAINTAINERS
16
MAINTAINERS
@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
|
||||
F: arch/arm/boot/dts/sama*.dtsi
|
||||
|
||||
ARM/CALXEDA HIGHBANK ARCHITECTURE
|
||||
M: Rob Herring <rob.herring@calxeda.com>
|
||||
M: Rob Herring <robh@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-highbank/
|
||||
@ -1008,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-keystone/
|
||||
F: drivers/clk/keystone/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
|
||||
ARM/LOGICPD PXA270 MACHINE SUPPORT
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
@ -3823,6 +3825,12 @@ T: git git://linuxtv.org/media_tree.git
|
||||
S: Maintained
|
||||
F: drivers/media/usb/gspca/
|
||||
|
||||
GUID PARTITION TABLE (GPT)
|
||||
M: Davidlohr Bueso <davidlohr@hp.com>
|
||||
L: linux-efi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: block/partitions/efi.*
|
||||
|
||||
STK1160 USB VIDEO CAPTURE DRIVER
|
||||
M: Ezequiel Garcia <elezegarcia@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -6240,7 +6248,7 @@ F: drivers/i2c/busses/i2c-ocores.c
|
||||
|
||||
OPEN FIRMWARE AND FLATTENED DEVICE TREE
|
||||
M: Grant Likely <grant.likely@linaro.org>
|
||||
M: Rob Herring <rob.herring@calxeda.com>
|
||||
M: Rob Herring <robh+dt@kernel.org>
|
||||
L: devicetree@vger.kernel.org
|
||||
W: http://fdt.secretlab.ca
|
||||
T: git git://git.secretlab.ca/git/linux-2.6.git
|
||||
@ -6252,7 +6260,7 @@ K: of_get_property
|
||||
K: of_match_table
|
||||
|
||||
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
|
||||
M: Rob Herring <rob.herring@calxeda.com>
|
||||
M: Rob Herring <robh+dt@kernel.org>
|
||||
M: Pawel Moll <pawel.moll@arm.com>
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
|
||||
@ -9581,7 +9589,7 @@ F: drivers/xen/*swiotlb*
|
||||
|
||||
XFS FILESYSTEM
|
||||
P: Silicon Graphics Inc
|
||||
M: Dave Chinner <dchinner@fromorbit.com>
|
||||
M: Dave Chinner <david@fromorbit.com>
|
||||
M: Ben Myers <bpm@sgi.com>
|
||||
M: xfs@oss.sgi.com
|
||||
L: xfs@oss.sgi.com
|
||||
|
24
Makefile
24
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -732,19 +732,15 @@ export mod_strip_cmd
|
||||
# Select initial ramdisk compression format, default is gzip(1).
|
||||
# This shall be used by the dracut(8) tool while creating an initramfs image.
|
||||
#
|
||||
INITRD_COMPRESS=gzip
|
||||
ifeq ($(CONFIG_RD_BZIP2), y)
|
||||
INITRD_COMPRESS=bzip2
|
||||
else ifeq ($(CONFIG_RD_LZMA), y)
|
||||
INITRD_COMPRESS=lzma
|
||||
else ifeq ($(CONFIG_RD_XZ), y)
|
||||
INITRD_COMPRESS=xz
|
||||
else ifeq ($(CONFIG_RD_LZO), y)
|
||||
INITRD_COMPRESS=lzo
|
||||
else ifeq ($(CONFIG_RD_LZ4), y)
|
||||
INITRD_COMPRESS=lz4
|
||||
endif
|
||||
export INITRD_COMPRESS
|
||||
INITRD_COMPRESS-y := gzip
|
||||
INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
|
||||
INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
|
||||
# do not export INITRD_COMPRESS, since we didn't actually
|
||||
# choose a sane default compression above.
|
||||
# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
|
||||
|
||||
ifdef CONFIG_MODULE_SIG_ALL
|
||||
MODSECKEY = ./signing_key.priv
|
||||
|
@ -8,7 +8,11 @@
|
||||
|
||||
/******** no-legacy-syscalls-ABI *******/
|
||||
|
||||
#ifndef _UAPI_ASM_ARC_UNISTD_H
|
||||
/*
|
||||
* Non-typical guard macro to enable inclusion twice in ARCH sys.c
|
||||
* That is how the Generic syscall wrapper generator works
|
||||
*/
|
||||
#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
|
||||
#define _UAPI_ASM_ARC_UNISTD_H
|
||||
|
||||
#define __ARCH_WANT_SYS_EXECVE
|
||||
@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
|
||||
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
|
||||
__SYSCALL(__NR_sysfs, sys_sysfs)
|
||||
|
||||
#undef __SYSCALL
|
||||
|
||||
#endif
|
||||
|
@ -87,9 +87,9 @@ gic: interrupt-controller@f1001000 {
|
||||
interrupts = <1 9 0xf04>;
|
||||
};
|
||||
|
||||
gpio0: gpio@ffc40000 {
|
||||
gpio0: gpio@e6050000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc40000 0 0x2c>;
|
||||
reg = <0 0xe6050000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 4 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -99,9 +99,9 @@ gpio0: gpio@ffc40000 {
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio1: gpio@ffc41000 {
|
||||
gpio1: gpio@e6051000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc41000 0 0x2c>;
|
||||
reg = <0 0xe6051000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 5 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -111,9 +111,9 @@ gpio1: gpio@ffc41000 {
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio2: gpio@ffc42000 {
|
||||
gpio2: gpio@e6052000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc42000 0 0x2c>;
|
||||
reg = <0 0xe6052000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 6 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -123,9 +123,9 @@ gpio2: gpio@ffc42000 {
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio3: gpio@ffc43000 {
|
||||
gpio3: gpio@e6053000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc43000 0 0x2c>;
|
||||
reg = <0 0xe6053000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 7 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -135,9 +135,9 @@ gpio3: gpio@ffc43000 {
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio4: gpio@ffc44000 {
|
||||
gpio4: gpio@e6054000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc44000 0 0x2c>;
|
||||
reg = <0 0xe6054000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 8 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -147,9 +147,9 @@ gpio4: gpio@ffc44000 {
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio5: gpio@ffc45000 {
|
||||
gpio5: gpio@e6055000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc45000 0 0x2c>;
|
||||
reg = <0 0xe6055000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 9 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -241,7 +241,7 @@ pfc: pfc@e6060000 {
|
||||
|
||||
sdhi0: sdhi@ee100000 {
|
||||
compatible = "renesas,sdhi-r8a7790";
|
||||
reg = <0 0xee100000 0 0x100>;
|
||||
reg = <0 0xee100000 0 0x200>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 165 4>;
|
||||
cap-sd-highspeed;
|
||||
@ -250,7 +250,7 @@ sdhi0: sdhi@ee100000 {
|
||||
|
||||
sdhi1: sdhi@ee120000 {
|
||||
compatible = "renesas,sdhi-r8a7790";
|
||||
reg = <0 0xee120000 0 0x100>;
|
||||
reg = <0 0xee120000 0 0x200>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 166 4>;
|
||||
cap-sd-highspeed;
|
||||
|
@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
|
||||
|
||||
static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
|
||||
{
|
||||
int res;
|
||||
|
||||
/* LCD enable GPIO */
|
||||
ldp_lcd_pdata.enable_gpio = gpio + 7;
|
||||
|
||||
/* Backlight enable GPIO */
|
||||
ldp_lcd_pdata.backlight_gpio = gpio + 15;
|
||||
|
||||
res = platform_device_register(&ldp_lcd_device);
|
||||
if (res)
|
||||
pr_err("Unable to register LCD: %d\n", res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
|
||||
|
||||
static struct platform_device *ldp_devices[] __initdata = {
|
||||
&ldp_gpio_keys_device,
|
||||
&ldp_lcd_device,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OMAP_MUX
|
||||
|
@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
|
||||
{ "dss_hdmi", "omapdss_hdmi", -1 },
|
||||
};
|
||||
|
||||
static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
||||
{
|
||||
u32 enable_mask, enable_shift;
|
||||
u32 pipd_mask, pipd_shift;
|
||||
u32 reg;
|
||||
|
||||
if (dsi_id == 0) {
|
||||
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI1_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
|
||||
} else if (dsi_id == 1) {
|
||||
enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI2_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
reg &= ~enable_mask;
|
||||
reg &= ~pipd_mask;
|
||||
|
||||
reg |= (lanes << enable_shift) & enable_mask;
|
||||
reg |= (lanes << pipd_shift) & pipd_mask;
|
||||
|
||||
omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
return omap4_dsi_mux_pads(dsi_id, lane_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
omap4_dsi_mux_pads(dsi_id, 0);
|
||||
}
|
||||
|
||||
static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
|
||||
|
@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
|
||||
|
||||
/* gpmc */
|
||||
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
|
||||
{ .irq = 20 },
|
||||
{ .irq = 20 + OMAP_INTC_START, },
|
||||
{ .irq = -1 }
|
||||
};
|
||||
|
||||
@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
|
||||
};
|
||||
|
||||
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
|
||||
{ .irq = 52 },
|
||||
{ .irq = 52 + OMAP_INTC_START, },
|
||||
{ .irq = -1 }
|
||||
};
|
||||
|
||||
|
@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
|
||||
};
|
||||
|
||||
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
|
||||
{ .irq = 20 },
|
||||
{ .irq = 20 + OMAP_INTC_START, },
|
||||
{ .irq = -1 }
|
||||
};
|
||||
|
||||
@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
|
||||
|
||||
static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
|
||||
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
|
||||
{ .irq = 24 },
|
||||
{ .irq = 24 + OMAP_INTC_START, },
|
||||
{ .irq = -1 }
|
||||
};
|
||||
|
||||
@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
|
||||
|
||||
static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
|
||||
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
|
||||
{ .irq = 28 },
|
||||
{ .irq = 28 + OMAP_INTC_START, },
|
||||
{ .irq = -1 }
|
||||
};
|
||||
|
||||
|
@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
|
||||
.class = &dra7xx_uart_hwmod_class,
|
||||
.clkdm_name = "l4per_clkdm",
|
||||
.main_clk = "uart1_gfclk_mux",
|
||||
.flags = HWMOD_SWSUP_SIDLE_ACT,
|
||||
.flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
|
||||
.prcm = {
|
||||
.omap4 = {
|
||||
.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
|
||||
|
@ -10,6 +10,8 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
|
||||
|
||||
#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
|
||||
|
@ -8,8 +8,6 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
|
||||
panic("SoC is not S3C64xx!");
|
||||
}
|
||||
|
||||
static void __init s3c64xx_dt_init_irq(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
samsung_wdt_reset_of_init();
|
||||
irqchip_init();
|
||||
};
|
||||
|
||||
static void __init s3c64xx_dt_init_machine(void)
|
||||
{
|
||||
samsung_wdt_reset_of_init();
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
}
|
||||
|
||||
@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
|
||||
/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
|
||||
.dt_compat = s3c64xx_dt_compat,
|
||||
.map_io = s3c64xx_dt_map_io,
|
||||
.init_irq = s3c64xx_dt_init_irq,
|
||||
.init_machine = s3c64xx_dt_init_machine,
|
||||
.restart = s3c64xx_dt_restart,
|
||||
MACHINE_END
|
||||
|
@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
|
||||
REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
|
||||
};
|
||||
|
||||
/* Fixed 3.3V regulator used by LCD backlight */
|
||||
static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
|
||||
REGULATOR_SUPPLY("power", "pwm-backlight.0"),
|
||||
};
|
||||
|
||||
/* Fixed 3.3V regulator to be used by SDHI0 */
|
||||
static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
|
||||
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
|
||||
@ -1196,6 +1201,8 @@ static void __init eva_init(void)
|
||||
|
||||
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
|
||||
ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
|
||||
regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
|
||||
ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
|
||||
|
||||
pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
|
||||
pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
|
||||
|
@ -679,7 +679,7 @@ static void __init bockw_init(void)
|
||||
.id = i,
|
||||
.data = &rsnd_card_info[i],
|
||||
.size_data = sizeof(struct asoc_simple_card_info),
|
||||
.dma_mask = ~0,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
};
|
||||
|
||||
platform_device_register_full(&cardinfo);
|
||||
|
@ -245,7 +245,9 @@ static void __init lager_init(void)
|
||||
{
|
||||
lager_add_standard_devices();
|
||||
|
||||
phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
|
||||
if (IS_ENABLED(CONFIG_PHYLIB))
|
||||
phy_register_fixup_for_id("r8a7790-ether-ff:01",
|
||||
lager_ksz8041_fixup);
|
||||
}
|
||||
|
||||
static const char * const lager_boards_compat_dt[] __initconst = {
|
||||
|
@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pfn_pte(pfn, info->prot);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
|
||||
if (map_foreign_page(pfn, info->fgmfn, info->domid))
|
||||
return -EFAULT;
|
||||
@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
|
||||
}
|
||||
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
|
||||
return 0;
|
||||
xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
|
||||
xen_hvm_resume_frames = res.start;
|
||||
xen_events_irq = irq_of_parse_and_map(node, 0);
|
||||
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
|
||||
version, xen_events_irq, xen_hvm_resume_frames);
|
||||
version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
|
||||
xen_setup_features();
|
||||
|
@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
||||
}
|
||||
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
|
||||
|
@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
|
||||
{
|
||||
int err, len, type, disabled = !ctrl.enabled;
|
||||
|
||||
if (disabled) {
|
||||
len = 0;
|
||||
type = HW_BREAKPOINT_EMPTY;
|
||||
} else {
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err)
|
||||
return err;
|
||||
attr->disabled = disabled;
|
||||
if (disabled)
|
||||
return 0;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK:
|
||||
if ((type & HW_BREAKPOINT_X) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case NT_ARM_HW_WATCH:
|
||||
if ((type & HW_BREAKPOINT_RW) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK:
|
||||
if ((type & HW_BREAKPOINT_X) != type)
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case NT_ARM_HW_WATCH:
|
||||
if ((type & HW_BREAKPOINT_RW) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
attr->bp_len = len;
|
||||
attr->bp_type = type;
|
||||
attr->disabled = disabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -58,7 +58,6 @@ soc@80000000 {
|
||||
compatible = "fsl,mpc5121-immr";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
ranges = <0x0 0x80000000 0x400000>;
|
||||
reg = <0x80000000 0x400000>;
|
||||
bus-frequency = <66000000>; // 66 MHz ips bus
|
||||
@ -189,6 +188,10 @@ ioctl@a000 {
|
||||
reg = <0xA000 0x1000>;
|
||||
};
|
||||
|
||||
// disable USB1 port
|
||||
// TODO:
|
||||
// correct pinmux config and fix USB3320 ulpi dependency
|
||||
// before re-enabling it
|
||||
usb@3000 {
|
||||
compatible = "fsl,mpc5121-usb2-dr";
|
||||
reg = <0x3000 0x400>;
|
||||
@ -197,6 +200,7 @@ usb@3000 {
|
||||
interrupts = <43 0x8>;
|
||||
dr_mode = "host";
|
||||
phy_type = "ulpi";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
// 5125 PSCs are not 52xx or 5121 PSC compatible
|
||||
|
@ -284,7 +284,7 @@ do_kvm_##n: \
|
||||
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
|
||||
beq- 1f; \
|
||||
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
|
||||
1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
|
||||
1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
|
||||
blt+ cr1,3f; /* abort if it is */ \
|
||||
li r1,(n); /* will be reloaded later */ \
|
||||
sth r1,PACA_TRAP_SAVE(r13); \
|
||||
|
@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
|
||||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
||||
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu);
|
||||
|
||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -79,6 +79,7 @@ struct kvmppc_host_state {
|
||||
ulong vmhandler;
|
||||
ulong scratch0;
|
||||
ulong scratch1;
|
||||
ulong scratch2;
|
||||
u8 in_guest;
|
||||
u8 restore_hid5;
|
||||
u8 napping;
|
||||
@ -106,6 +107,7 @@ struct kvmppc_host_state {
|
||||
};
|
||||
|
||||
struct kvmppc_book3s_shadow_vcpu {
|
||||
bool in_use;
|
||||
ulong gpr[14];
|
||||
u32 cr;
|
||||
u32 xer;
|
||||
|
@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
|
||||
extern void enable_kernel_spe(void);
|
||||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
|
||||
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void discard_lazy_cpu_state(void);
|
||||
|
@ -4,13 +4,18 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* The PowerPC can do unaligned accesses itself in big endian mode.
|
||||
* The PowerPC can do unaligned accesses itself based on its endian mode.
|
||||
*/
|
||||
#include <linux/unaligned/access_ok.h>
|
||||
#include <linux/unaligned/generic.h>
|
||||
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
#define get_unaligned __get_unaligned_le
|
||||
#define put_unaligned __put_unaligned_le
|
||||
#else
|
||||
#define get_unaligned __get_unaligned_be
|
||||
#define put_unaligned __put_unaligned_be
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_UNALIGNED_H */
|
||||
|
@ -576,6 +576,7 @@ int main(void)
|
||||
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
|
||||
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
|
||||
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
|
||||
HSTATE_FIELD(HSTATE_NAPPING, napping);
|
||||
|
@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
|
||||
* of the function that the cpu should jump to to continue
|
||||
* initialization.
|
||||
*/
|
||||
.balign 8
|
||||
.globl __secondary_hold_spinloop
|
||||
__secondary_hold_spinloop:
|
||||
.llong 0x0
|
||||
@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
|
||||
mtctr r8
|
||||
bctr
|
||||
|
||||
.balign 8
|
||||
p_end: .llong _end - _stext
|
||||
|
||||
4: /* Now copy the rest of the kernel up to _end */
|
||||
|
@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void prime_debug_regs(struct thread_struct *thread)
|
||||
static void prime_debug_regs(struct debug_reg *debug)
|
||||
{
|
||||
/*
|
||||
* We could have inherited MSR_DE from userspace, since
|
||||
@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
|
||||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
|
||||
mtspr(SPRN_IAC1, thread->debug.iac1);
|
||||
mtspr(SPRN_IAC2, thread->debug.iac2);
|
||||
mtspr(SPRN_IAC1, debug->iac1);
|
||||
mtspr(SPRN_IAC2, debug->iac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
mtspr(SPRN_IAC3, thread->debug.iac3);
|
||||
mtspr(SPRN_IAC4, thread->debug.iac4);
|
||||
mtspr(SPRN_IAC3, debug->iac3);
|
||||
mtspr(SPRN_IAC4, debug->iac4);
|
||||
#endif
|
||||
mtspr(SPRN_DAC1, thread->debug.dac1);
|
||||
mtspr(SPRN_DAC2, thread->debug.dac2);
|
||||
mtspr(SPRN_DAC1, debug->dac1);
|
||||
mtspr(SPRN_DAC2, debug->dac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
mtspr(SPRN_DVC1, thread->debug.dvc1);
|
||||
mtspr(SPRN_DVC2, thread->debug.dvc2);
|
||||
mtspr(SPRN_DVC1, debug->dvc1);
|
||||
mtspr(SPRN_DVC2, debug->dvc2);
|
||||
#endif
|
||||
mtspr(SPRN_DBCR0, thread->debug.dbcr0);
|
||||
mtspr(SPRN_DBCR1, thread->debug.dbcr1);
|
||||
mtspr(SPRN_DBCR0, debug->dbcr0);
|
||||
mtspr(SPRN_DBCR1, debug->dbcr1);
|
||||
#ifdef CONFIG_BOOKE
|
||||
mtspr(SPRN_DBCR2, thread->debug.dbcr2);
|
||||
mtspr(SPRN_DBCR2, debug->dbcr2);
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
|
||||
* debug registers, set the debug registers from the values
|
||||
* stored in the new thread.
|
||||
*/
|
||||
void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||
void switch_booke_debug_regs(struct debug_reg *new_debug)
|
||||
{
|
||||
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|
||||
|| (new_thread->debug.dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_thread);
|
||||
|| (new_debug->dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_debug);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
|
||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
switch_booke_debug_regs(&new->thread);
|
||||
switch_booke_debug_regs(&new->thread.debug);
|
||||
#else
|
||||
/*
|
||||
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
||||
|
@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
slb_v = vcpu->kvm->arch.vrma_slb_v;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
/* Find the HPTE in the hash table */
|
||||
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
|
||||
HPTE_V_VALID | HPTE_V_ABSENT);
|
||||
if (index < 0)
|
||||
if (index < 0) {
|
||||
preempt_enable();
|
||||
return -ENOENT;
|
||||
}
|
||||
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
|
||||
v = hptep[0] & ~HPTE_V_HVLOCK;
|
||||
gr = kvm->arch.revmap[index].guest_rpte;
|
||||
@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
/* Unlock the HPTE */
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
hptep[0] = v;
|
||||
preempt_enable();
|
||||
|
||||
gpte->eaddr = eaddr;
|
||||
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
|
||||
@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return -EFAULT;
|
||||
} else {
|
||||
page = pages[0];
|
||||
pfn = page_to_pfn(page);
|
||||
if (PageHuge(page)) {
|
||||
page = compound_head(page);
|
||||
pte_size <<= compound_order(page);
|
||||
@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
|
||||
}
|
||||
|
||||
/* Set the HPTE to point to pfn */
|
||||
r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
|
||||
/*
|
||||
* Set the HPTE to point to pfn.
|
||||
* Since the pfn is at PAGE_SIZE granularity, make sure we
|
||||
* don't mask out lower-order bits if psize < PAGE_SIZE.
|
||||
*/
|
||||
if (psize < PAGE_SIZE)
|
||||
psize = PAGE_SIZE;
|
||||
r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
|
||||
if (hpte_is_writable(r) && !write_ok)
|
||||
r = hpte_make_readonly(r);
|
||||
ret = RESUME_GUEST;
|
||||
|
@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->preempt_tb != TB_NIL) {
|
||||
vc->stolen_tb += mftb() - vc->preempt_tb;
|
||||
@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
}
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
|
||||
vc->preempt_tb = mftb();
|
||||
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
|
||||
vcpu->arch.busy_preempt = mftb();
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
|
||||
*/
|
||||
if (vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->runner->arch.run_task != current) {
|
||||
spin_lock(&vc->runner->arch.tbacct_lock);
|
||||
spin_lock_irq(&vc->runner->arch.tbacct_lock);
|
||||
p = vc->stolen_tb;
|
||||
if (vc->preempt_tb != TB_NIL)
|
||||
p += now - vc->preempt_tb;
|
||||
spin_unlock(&vc->runner->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vc->runner->arch.tbacct_lock);
|
||||
} else {
|
||||
p = vc->stolen_tb;
|
||||
}
|
||||
@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
|
||||
core_stolen = vcore_stolen_time(vc, now);
|
||||
stolen = core_stolen - vcpu->arch.stolen_logged;
|
||||
vcpu->arch.stolen_logged = core_stolen;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
stolen += vcpu->arch.busy_stolen;
|
||||
vcpu->arch.busy_stolen = 0;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
if (!dt || !vpa)
|
||||
return;
|
||||
memset(dt, 0, sizeof(struct dtl_entry));
|
||||
@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
||||
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
|
||||
return RESUME_HOST;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
rc = kvmppc_rtas_hcall(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (rc == -ENOENT)
|
||||
return RESUME_HOST;
|
||||
@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
|
||||
|
||||
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
|
||||
return;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
now = mftb();
|
||||
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
|
||||
vcpu->arch.stolen_logged;
|
||||
vcpu->arch.busy_preempt = now;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
--vc->n_runnable;
|
||||
list_del(&vcpu->arch.run_list);
|
||||
}
|
||||
|
@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
is_io = pa & (HPTE_R_I | HPTE_R_W);
|
||||
pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
|
||||
pa &= PAGE_MASK;
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
} else {
|
||||
/* Translate to host virtual address */
|
||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||
@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
ptel = hpte_make_readonly(ptel);
|
||||
is_io = hpte_cache_bits(pte_val(pte));
|
||||
pa = pte_pfn(pte) << PAGE_SHIFT;
|
||||
pa |= hva & (pte_size - 1);
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (pte_size < psize)
|
||||
return H_PARAMETER;
|
||||
if (pa && pte_size > psize)
|
||||
pa |= gpa & (pte_size - 1);
|
||||
|
||||
ptel &= ~(HPTE_R_PP0 - psize);
|
||||
ptel |= pa;
|
||||
@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
|
||||
20, /* 1M, unsupported */
|
||||
};
|
||||
|
||||
/* When called from virtmode, this func should be protected by
|
||||
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
|
||||
* can trigger deadlock issue.
|
||||
*/
|
||||
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
|
||||
unsigned long valid)
|
||||
{
|
||||
|
@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
13: b machine_check_fwnmi
|
||||
|
||||
|
||||
/*
|
||||
* We come in here when wakened from nap mode on a secondary hw thread.
|
||||
* Relocation is off and most register values are lost.
|
||||
@ -224,6 +223,11 @@ kvm_start_guest:
|
||||
/* Clear our vcpu pointer so we don't come back in early */
|
||||
li r0, 0
|
||||
std r0, HSTATE_KVM_VCPU(r13)
|
||||
/*
|
||||
* Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
|
||||
* the nap_count, because once the increment to nap_count is
|
||||
* visible we could be given another vcpu.
|
||||
*/
|
||||
lwsync
|
||||
/* Clear any pending IPI - we're an offline thread */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
@ -241,7 +245,6 @@ kvm_start_guest:
|
||||
/* increment the nap count and then go to nap mode */
|
||||
ld r4, HSTATE_KVM_VCORE(r13)
|
||||
addi r4, r4, VCORE_NAP_COUNT
|
||||
lwsync /* make previous updates visible */
|
||||
51: lwarx r3, 0, r4
|
||||
addi r3, r3, 1
|
||||
stwcx. r3, 0, r4
|
||||
@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
|
||||
* guest CR, R12 saved in shadow VCPU SCRATCH1/0
|
||||
* guest R13 saved in SPRN_SCRATCH0
|
||||
*/
|
||||
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
|
||||
std r9, HSTATE_HOST_R2(r13)
|
||||
std r9, HSTATE_SCRATCH2(r13)
|
||||
|
||||
lbz r9, HSTATE_IN_GUEST(r13)
|
||||
cmpwi r9, KVM_GUEST_MODE_HOST_HV
|
||||
beq kvmppc_bad_host_intr
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
cmpwi r9, KVM_GUEST_MODE_GUEST
|
||||
ld r9, HSTATE_HOST_R2(r13)
|
||||
ld r9, HSTATE_SCRATCH2(r13)
|
||||
beq kvmppc_interrupt_pr
|
||||
#endif
|
||||
/* We're now back in the host but in guest MMU context */
|
||||
@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
|
||||
std r6, VCPU_GPR(R6)(r9)
|
||||
std r7, VCPU_GPR(R7)(r9)
|
||||
std r8, VCPU_GPR(R8)(r9)
|
||||
ld r0, HSTATE_HOST_R2(r13)
|
||||
ld r0, HSTATE_SCRATCH2(r13)
|
||||
std r0, VCPU_GPR(R9)(r9)
|
||||
std r10, VCPU_GPR(R10)(r9)
|
||||
std r11, VCPU_GPR(R11)(r9)
|
||||
@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
*/
|
||||
/* Increment the threads-exiting-guest count in the 0xff00
|
||||
bits of vcore->entry_exit_count */
|
||||
lwsync
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
addi r6,r5,VCORE_ENTRY_EXIT
|
||||
41: lwarx r3,0,r6
|
||||
addi r0,r3,0x100
|
||||
stwcx. r0,0,r6
|
||||
bne 41b
|
||||
lwsync
|
||||
isync /* order stwcx. vs. reading napping_threads */
|
||||
|
||||
/*
|
||||
* At this point we have an interrupt that we have to pass
|
||||
@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
sld r0,r0,r4
|
||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
||||
beq 43f
|
||||
/* Order entry/exit update vs. IPIs */
|
||||
sync
|
||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
||||
subf r6,r4,r13
|
||||
42: andi. r0,r3,1
|
||||
@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
|
||||
bge kvm_cede_exit
|
||||
stwcx. r4,0,r6
|
||||
bne 31b
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
isync
|
||||
li r0,1
|
||||
stb r0,HSTATE_NAPPING(r13)
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
lwsync
|
||||
mr r4,r3
|
||||
lwz r7,VCORE_ENTRY_EXIT(r5)
|
||||
cmpwi r7,0x100
|
||||
|
@ -129,29 +129,32 @@ kvm_start_lightweight:
|
||||
* R12 = exit handler id
|
||||
* R13 = PACA
|
||||
* SVCPU.* = guest *
|
||||
* MSR.EE = 1
|
||||
*
|
||||
*/
|
||||
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
/*
|
||||
* kvmppc_copy_from_svcpu can clobber volatile registers, save
|
||||
* the exit handler id to the vcpu and restore it from there later.
|
||||
*/
|
||||
stw r12, VCPU_TRAP(r3)
|
||||
|
||||
/* Transfer reg values from shadow vcpu back to vcpu struct */
|
||||
/* On 64-bit, interrupts are still off at this point */
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
GET_SHADOW_VCPU(r4)
|
||||
bl FUNC(kvmppc_copy_from_svcpu)
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Re-enable interrupts */
|
||||
ld r3, HSTATE_HOST_MSR(r13)
|
||||
ori r3, r3, MSR_EE
|
||||
MTMSR_EERI(r3)
|
||||
|
||||
/*
|
||||
* Reload kernel SPRG3 value.
|
||||
* No need to save guest value as usermode can't modify SPRG3.
|
||||
*/
|
||||
ld r3, PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3, r3
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
/* R7 = vcpu */
|
||||
@ -177,7 +180,7 @@ kvm_start_lightweight:
|
||||
PPC_STL r31, VCPU_GPR(R31)(r7)
|
||||
|
||||
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
|
||||
mr r5, r12
|
||||
lwz r5, VCPU_TRAP(r7)
|
||||
|
||||
/* Restore r3 (kvm_run) and r4 (vcpu) */
|
||||
REST_2GPRS(3, r1)
|
||||
|
@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
||||
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu->in_use = 0;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
vcpu->cpu = smp_processor_id();
|
||||
@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
if (svcpu->in_use) {
|
||||
kvmppc_copy_from_svcpu(vcpu, svcpu);
|
||||
}
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
svcpu_put(svcpu);
|
||||
@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
svcpu->ctr = vcpu->arch.ctr;
|
||||
svcpu->lr = vcpu->arch.lr;
|
||||
svcpu->pc = vcpu->arch.pc;
|
||||
svcpu->in_use = true;
|
||||
}
|
||||
|
||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
/*
|
||||
* vcpu_put would just call us again because in_use hasn't
|
||||
* been updated yet.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Maybe we were already preempted and synced the svcpu from
|
||||
* our preempt notifiers. Don't bother touching this svcpu then.
|
||||
*/
|
||||
if (!svcpu->in_use)
|
||||
goto out;
|
||||
|
||||
vcpu->arch.gpr[0] = svcpu->gpr[0];
|
||||
vcpu->arch.gpr[1] = svcpu->gpr[1];
|
||||
vcpu->arch.gpr[2] = svcpu->gpr[2];
|
||||
@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.fault_dar = svcpu->fault_dar;
|
||||
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.last_inst = svcpu->last_inst;
|
||||
svcpu->in_use = false;
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||
|
@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
|
||||
|
||||
li r6, MSR_IR | MSR_DR
|
||||
andc r6, r5, r6 /* Clear DR and IR in MSR value */
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/*
|
||||
* Set EE in HOST_MSR so that it's enabled when we get into our
|
||||
* C exit handler function. On 64-bit we delay enabling
|
||||
* interrupts until we have finished transferring stuff
|
||||
* to or from the PACA.
|
||||
* C exit handler function.
|
||||
*/
|
||||
ori r5, r5, MSR_EE
|
||||
#endif
|
||||
mtsrr0 r7
|
||||
mtsrr1 r6
|
||||
RFI
|
||||
|
@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct thread_struct thread;
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
|
||||
/* Switch to guest debug context */
|
||||
thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&thread);
|
||||
thread.debug = current->thread.debug;
|
||||
debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&debug);
|
||||
debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
kvmppc_fix_ee_before_entry();
|
||||
@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
We also get here with interrupts enabled. */
|
||||
|
||||
/* Switch back to user space debug context */
|
||||
switch_booke_debug_regs(&thread);
|
||||
current->thread.debug = thread.debug;
|
||||
switch_booke_debug_regs(&debug);
|
||||
current->thread.debug = debug;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
@ -9,6 +9,14 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define sLd sld /* Shift towards low-numbered address. */
|
||||
#define sHd srd /* Shift towards high-numbered address. */
|
||||
#else
|
||||
#define sLd srd /* Shift towards low-numbered address. */
|
||||
#define sHd sld /* Shift towards high-numbered address. */
|
||||
#endif
|
||||
|
||||
.align 7
|
||||
_GLOBAL(__copy_tofrom_user)
|
||||
BEGIN_FTR_SECTION
|
||||
@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
||||
|
||||
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
|
||||
25: ld r0,8(r4)
|
||||
sld r6,r9,r10
|
||||
sLd r6,r9,r10
|
||||
26: ldu r9,16(r4)
|
||||
srd r7,r0,r11
|
||||
sld r8,r0,r10
|
||||
sHd r7,r0,r11
|
||||
sLd r8,r0,r10
|
||||
or r7,r7,r6
|
||||
blt cr6,79f
|
||||
27: ld r0,8(r4)
|
||||
@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
||||
|
||||
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
|
||||
29: ldu r9,8(r4)
|
||||
sld r8,r0,r10
|
||||
sLd r8,r0,r10
|
||||
addi r3,r3,-8
|
||||
blt cr6,5f
|
||||
30: ld r0,8(r4)
|
||||
srd r12,r9,r11
|
||||
sld r6,r9,r10
|
||||
sHd r12,r9,r11
|
||||
sLd r6,r9,r10
|
||||
31: ldu r9,16(r4)
|
||||
or r12,r8,r12
|
||||
srd r7,r0,r11
|
||||
sld r8,r0,r10
|
||||
sHd r7,r0,r11
|
||||
sLd r8,r0,r10
|
||||
addi r3,r3,16
|
||||
beq cr6,78f
|
||||
|
||||
1: or r7,r7,r6
|
||||
32: ld r0,8(r4)
|
||||
76: std r12,8(r3)
|
||||
2: srd r12,r9,r11
|
||||
sld r6,r9,r10
|
||||
2: sHd r12,r9,r11
|
||||
sLd r6,r9,r10
|
||||
33: ldu r9,16(r4)
|
||||
or r12,r8,r12
|
||||
77: stdu r7,16(r3)
|
||||
srd r7,r0,r11
|
||||
sld r8,r0,r10
|
||||
sHd r7,r0,r11
|
||||
sLd r8,r0,r10
|
||||
bdnz 1b
|
||||
|
||||
78: std r12,8(r3)
|
||||
or r7,r7,r6
|
||||
79: std r7,16(r3)
|
||||
5: srd r12,r9,r11
|
||||
5: sHd r12,r9,r11
|
||||
or r12,r8,r12
|
||||
80: std r12,24(r3)
|
||||
bne 6f
|
||||
@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
||||
blr
|
||||
6: cmpwi cr1,r5,8
|
||||
addi r3,r3,32
|
||||
sld r9,r9,r10
|
||||
sLd r9,r9,r10
|
||||
ble cr1,7f
|
||||
34: ld r0,8(r4)
|
||||
srd r7,r0,r11
|
||||
sHd r7,r0,r11
|
||||
or r9,r7,r9
|
||||
7:
|
||||
bf cr7*4+1,1f
|
||||
#ifdef __BIG_ENDIAN__
|
||||
rotldi r9,r9,32
|
||||
#endif
|
||||
94: stw r9,0(r3)
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
rotrdi r9,r9,32
|
||||
#endif
|
||||
addi r3,r3,4
|
||||
1: bf cr7*4+2,2f
|
||||
#ifdef __BIG_ENDIAN__
|
||||
rotldi r9,r9,16
|
||||
#endif
|
||||
95: sth r9,0(r3)
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
rotrdi r9,r9,16
|
||||
#endif
|
||||
addi r3,r3,2
|
||||
2: bf cr7*4+3,3f
|
||||
#ifdef __BIG_ENDIAN__
|
||||
rotldi r9,r9,8
|
||||
#endif
|
||||
96: stb r9,0(r3)
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
rotrdi r9,r9,8
|
||||
#endif
|
||||
3: li r3,0
|
||||
blr
|
||||
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include "powernv.h"
|
||||
#include "pci.h"
|
||||
|
||||
static char *hub_diag = NULL;
|
||||
static int ioda_eeh_nb_init = 0;
|
||||
|
||||
static int ioda_eeh_event(struct notifier_block *nb,
|
||||
@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
|
||||
ioda_eeh_nb_init = 1;
|
||||
}
|
||||
|
||||
/* We needn't HUB diag-data on PHB3 */
|
||||
if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
|
||||
hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!hub_diag) {
|
||||
pr_err("%s: Out of memory !\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
if (phb->dbgfs) {
|
||||
debugfs_create_file("err_injct_outbound", 0600,
|
||||
@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
|
||||
static void ioda_eeh_hub_diag(struct pci_controller *hose)
|
||||
{
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
struct OpalIoP7IOCErrorData *data;
|
||||
struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
|
||||
long rc;
|
||||
|
||||
data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
|
||||
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
|
||||
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
|
||||
__func__, phb->hub_id, rc);
|
||||
@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
|
||||
struct OpalIoPhbErrorCommon *common;
|
||||
long rc;
|
||||
|
||||
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
|
||||
rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
|
||||
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
|
||||
PNV_PCI_DIAG_BUF_SIZE);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
|
||||
__func__, hose->global_number, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
|
||||
switch (common->ioType) {
|
||||
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
|
||||
ioda_eeh_p7ioc_phb_diag(hose, common);
|
||||
|
@ -172,11 +172,13 @@ struct pnv_phb {
|
||||
} ioda;
|
||||
};
|
||||
|
||||
/* PHB status structure */
|
||||
/* PHB and hub status structure */
|
||||
union {
|
||||
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
|
||||
struct OpalIoP7IOCPhbErrorData p7ioc;
|
||||
struct OpalIoP7IOCErrorData hub_diag;
|
||||
} diag;
|
||||
|
||||
};
|
||||
|
||||
extern struct pci_ops pnv_pci_ops;
|
||||
|
@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
|
||||
checksum.o strlen.o div64.o div64-generic.o
|
||||
|
||||
# Extracted from libgcc
|
||||
lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
|
||||
obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
|
||||
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
|
||||
udiv_qrnnd.o
|
||||
|
||||
|
@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline unsigned long pte_accessible(pte_t a)
|
||||
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
return pte_val(a) & _PAGE_VALID;
|
||||
}
|
||||
@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(orig))
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
|
@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline int pte_accessible(pte_t a)
|
||||
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
return pte_flags(a) & _PAGE_PRESENT;
|
||||
if (pte_flags(a) & _PAGE_PRESENT)
|
||||
return true;
|
||||
|
||||
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
|
||||
mm_tlb_flush_pending(mm))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pte_hidden(pte_t pte)
|
||||
|
@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_PEBS);
|
||||
}
|
||||
|
||||
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
|
||||
if (c->x86 == 6 && cpu_has_clflush &&
|
||||
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
|
||||
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
/* Similar to the PMD case, NUMA hinting must take slow path */
|
||||
if (pte_numa(pte)) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (unlikely(pmd_large(pmd))) {
|
||||
/*
|
||||
* NUMA hinting faults need to be handled in the GUP
|
||||
* slowpath for accounting purposes and so that they
|
||||
* can be serialised against THP migration.
|
||||
*/
|
||||
if (pmd_numa(pmd))
|
||||
return 0;
|
||||
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
|
||||
void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i, j;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
hctx_for_each_ctx(hctx, ctx, j) {
|
||||
kobject_del(&ctx->kobj);
|
||||
kobject_put(&ctx->kobj);
|
||||
}
|
||||
kobject_del(&hctx->kobj);
|
||||
kobject_put(&hctx->kobj);
|
||||
}
|
||||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->mq_kobj);
|
||||
kobject_put(&q->mq_kobj);
|
||||
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
|
@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
|
||||
config ACPI_EXTLOG
|
||||
tristate "Extended Error Log support"
|
||||
depends on X86_MCE && X86_LOCAL_APIC
|
||||
select EFI
|
||||
select UEFI_CPER
|
||||
default n
|
||||
help
|
||||
|
@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
|
||||
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
|
||||
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
|
||||
{ "INT33B2", },
|
||||
{ "INT33FC", },
|
||||
|
||||
{ "INT3430", (unsigned long)&lpt_dev_desc },
|
||||
{ "INT3431", (unsigned long)&lpt_dev_desc },
|
||||
|
@ -2,7 +2,6 @@ config ACPI_APEI
|
||||
bool "ACPI Platform Error Interface (APEI)"
|
||||
select MISC_FILESYSTEMS
|
||||
select PSTORE
|
||||
select EFI
|
||||
select UEFI_CPER
|
||||
depends on X86
|
||||
help
|
||||
|
@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
|
||||
static struct pstore_info erst_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "erst",
|
||||
.flags = PSTORE_FLAGS_FRAGILE,
|
||||
.open = erst_open_pstore,
|
||||
.close = erst_close_pstore,
|
||||
.read = erst_reader,
|
||||
|
@ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* AHCI controllers often implement SFF compatible interface.
|
||||
* Grab all PCI BARs just in case.
|
||||
*/
|
||||
rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
|
||||
u8 map;
|
||||
@ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
}
|
||||
|
||||
/* AHCI controllers often implement SFF compatible interface.
|
||||
* Grab all PCI BARs just in case.
|
||||
*/
|
||||
rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv)
|
||||
return -ENOMEM;
|
||||
|
@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
|
||||
/*
|
||||
* set PHY Paremeters, two steps to configure the GPR13,
|
||||
* one write for rest of parameters, mask of first write
|
||||
* is 0x07fffffd, and the other one write for setting
|
||||
* is 0x07ffffff, and the other one write for setting
|
||||
* the mpll_clk_en.
|
||||
*/
|
||||
regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
|
||||
@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
|
||||
| IMX6Q_GPR13_SATA_TX_ATTEN_MASK
|
||||
| IMX6Q_GPR13_SATA_TX_BOOST_MASK
|
||||
| IMX6Q_GPR13_SATA_TX_LVL_MASK
|
||||
| IMX6Q_GPR13_SATA_MPLL_CLK_EN
|
||||
| IMX6Q_GPR13_SATA_TX_EDGE_RATE
|
||||
, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
|
||||
| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
|
||||
|
@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
|
||||
"failed to get NCQ Send/Recv Log Emask 0x%x\n",
|
||||
err_mask);
|
||||
} else {
|
||||
u8 *cmds = dev->ncq_send_recv_cmds;
|
||||
|
||||
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
|
||||
memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
|
||||
ATA_LOG_NCQ_SEND_RECV_SIZE);
|
||||
memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
|
||||
ata_dev_dbg(dev, "disabling queued TRIM support\n");
|
||||
cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
|
||||
~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||
ATA_HORKAGE_FIRMWARE_WARN },
|
||||
|
||||
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
|
||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||
Windows driver .inf file - also several Linux problem reports */
|
||||
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
|
||||
@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
|
||||
/* End Marker */
|
||||
{ }
|
||||
};
|
||||
@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
|
||||
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
|
||||
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
|
||||
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
|
||||
{ "disable", .horkage_on = ATA_HORKAGE_DISABLE },
|
||||
};
|
||||
char *start = *cur, *p = *cur;
|
||||
char *id, *val, *endp;
|
||||
|
@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX - UGLY HACK
|
||||
*
|
||||
* The block layer suspend/resume path is fundamentally broken due
|
||||
* to freezable kthreads and workqueue and may deadlock if a block
|
||||
* device gets removed while resume is in progress. I don't know
|
||||
* what the solution is short of removing freezable kthreads and
|
||||
* workqueues altogether.
|
||||
*
|
||||
* The following is an ugly hack to avoid kicking off device
|
||||
* removal while freezer is active. This is a joke but does avoid
|
||||
* this particular deadlock scenario.
|
||||
*
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=62801
|
||||
* http://marc.info/?l=linux-kernel&m=138695698516487
|
||||
*/
|
||||
#ifdef CONFIG_FREEZER
|
||||
while (pm_freezing)
|
||||
msleep(10);
|
||||
#endif
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
mutex_lock(&ap->scsi_scan_mutex);
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/fs.h>
|
||||
@ -65,7 +66,7 @@ enum {
|
||||
NULL_Q_MQ = 2,
|
||||
};
|
||||
|
||||
static int submit_queues = 1;
|
||||
static int submit_queues;
|
||||
module_param(submit_queues, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
|
||||
|
||||
@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
|
||||
module_param(hw_queue_depth, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
|
||||
|
||||
static bool use_per_node_hctx = true;
|
||||
static bool use_per_node_hctx = false;
|
||||
module_param(use_per_node_hctx, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
|
||||
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
|
||||
|
||||
static void put_tag(struct nullb_queue *nq, unsigned int tag)
|
||||
{
|
||||
@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
|
||||
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
|
||||
{
|
||||
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
|
||||
hctx_index);
|
||||
int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
|
||||
int tip = (reg->nr_hw_queues % nr_online_nodes);
|
||||
int node = 0, i, n;
|
||||
|
||||
/*
|
||||
* Split submit queues evenly wrt to the number of nodes. If uneven,
|
||||
* fill the first buckets with one extra, until the rest is filled with
|
||||
* no extra.
|
||||
*/
|
||||
for (i = 0, n = 1; i < hctx_index; i++, n++) {
|
||||
if (n % b_size == 0) {
|
||||
n = 0;
|
||||
node++;
|
||||
|
||||
tip--;
|
||||
if (!tip)
|
||||
b_size = reg->nr_hw_queues / nr_online_nodes;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A node might not be online, therefore map the relative node id to the
|
||||
* real node id.
|
||||
*/
|
||||
for_each_online_node(n) {
|
||||
if (!node)
|
||||
break;
|
||||
node--;
|
||||
}
|
||||
|
||||
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
|
||||
}
|
||||
|
||||
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
|
||||
@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
|
||||
kfree(hctx);
|
||||
}
|
||||
|
||||
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
|
||||
{
|
||||
BUG_ON(!nullb);
|
||||
BUG_ON(!nq);
|
||||
|
||||
init_waitqueue_head(&nq->wait);
|
||||
nq->queue_depth = nullb->queue_depth;
|
||||
}
|
||||
|
||||
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
unsigned int index)
|
||||
{
|
||||
struct nullb *nullb = data;
|
||||
struct nullb_queue *nq = &nullb->queues[index];
|
||||
|
||||
init_waitqueue_head(&nq->wait);
|
||||
nq->queue_depth = nullb->queue_depth;
|
||||
nullb->nr_queues++;
|
||||
hctx->driver_data = nq;
|
||||
null_init_queue(nullb, nq);
|
||||
nullb->nr_queues++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
|
||||
|
||||
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
|
||||
if (!nq->cmds)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
|
||||
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!nq->tag_map) {
|
||||
kfree(nq->cmds);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < nq->queue_depth; i++) {
|
||||
@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
|
||||
|
||||
static int setup_queues(struct nullb *nullb)
|
||||
{
|
||||
struct nullb_queue *nq;
|
||||
int i;
|
||||
|
||||
nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
|
||||
nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
|
||||
GFP_KERNEL);
|
||||
if (!nullb->queues)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
nullb->nr_queues = 0;
|
||||
nullb->queue_depth = hw_queue_depth;
|
||||
|
||||
if (queue_mode == NULL_Q_MQ)
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_driver_queues(struct nullb *nullb)
|
||||
{
|
||||
struct nullb_queue *nq;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < submit_queues; i++) {
|
||||
nq = &nullb->queues[i];
|
||||
init_waitqueue_head(&nq->wait);
|
||||
nq->queue_depth = hw_queue_depth;
|
||||
if (setup_commands(nq))
|
||||
break;
|
||||
|
||||
null_init_queue(nullb, nq);
|
||||
|
||||
ret = setup_commands(nq);
|
||||
if (ret)
|
||||
goto err_queue;
|
||||
nullb->nr_queues++;
|
||||
}
|
||||
|
||||
if (i == submit_queues)
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
err_queue:
|
||||
cleanup_queues(nullb);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int null_add_dev(void)
|
||||
@ -518,11 +560,13 @@ static int null_add_dev(void)
|
||||
} else if (queue_mode == NULL_Q_BIO) {
|
||||
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
|
||||
blk_queue_make_request(nullb->q, null_queue_bio);
|
||||
init_driver_queues(nullb);
|
||||
} else {
|
||||
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
|
||||
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
|
||||
if (nullb->q)
|
||||
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
|
||||
init_driver_queues(nullb);
|
||||
}
|
||||
|
||||
if (!nullb->q)
|
||||
@ -579,7 +623,13 @@ static int __init null_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (submit_queues > nr_cpu_ids)
|
||||
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
|
||||
if (submit_queues < nr_online_nodes) {
|
||||
pr_warn("null_blk: submit_queues param is set to %u.",
|
||||
nr_online_nodes);
|
||||
submit_queues = nr_online_nodes;
|
||||
}
|
||||
} else if (submit_queues > nr_cpu_ids)
|
||||
submit_queues = nr_cpu_ids;
|
||||
else if (!submit_queues)
|
||||
submit_queues = 1;
|
||||
|
@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
|
||||
}
|
||||
}
|
||||
|
||||
const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
|
||||
static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case SKD_MSG_STATE_IDLE:
|
||||
@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
|
||||
}
|
||||
}
|
||||
|
||||
const char *skd_skreq_state_to_str(enum skd_req_state state)
|
||||
static const char *skd_skreq_state_to_str(enum skd_req_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case SKD_REQ_STATE_IDLE:
|
||||
|
@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x0CF3, 0xE004) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE005) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
{ USB_DEVICE(0x0489, 0xe057) },
|
||||
{ USB_DEVICE(0x13d3, 0x3393) },
|
||||
{ USB_DEVICE(0x0489, 0xe04e) },
|
||||
@ -132,6 +133,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -155,6 +155,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
int ret = 0;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
|
||||
/* Use the default policy if its valid. */
|
||||
if (cpufreq_driver->setpolicy)
|
||||
cpufreq_parse_governor(policy->governor->name,
|
||||
&new_policy.policy, NULL);
|
||||
|
||||
/* assure that the starting sequence is run in cpufreq_set_policy */
|
||||
policy->governor = NULL;
|
||||
|
||||
@ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
unsigned int cpu, struct device *dev,
|
||||
bool frozen)
|
||||
unsigned int cpu, struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
@ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
}
|
||||
}
|
||||
|
||||
/* Don't touch sysfs links during light-weight init */
|
||||
if (!frozen)
|
||||
ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
|
||||
|
||||
return ret;
|
||||
return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -926,6 +927,27 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct kobject *kobj;
|
||||
struct completion *cmp;
|
||||
|
||||
down_read(&policy->rwsem);
|
||||
kobj = &policy->kobj;
|
||||
cmp = &policy->kobj_unregister;
|
||||
up_read(&policy->rwsem);
|
||||
kobject_put(kobj);
|
||||
|
||||
/*
|
||||
* We need to make sure that the underlying kobj is
|
||||
* actually not referenced anymore by anybody before we
|
||||
* proceed with unloading.
|
||||
*/
|
||||
pr_debug("waiting for dropping of refcount\n");
|
||||
wait_for_completion(cmp);
|
||||
pr_debug("wait complete\n");
|
||||
}
|
||||
|
||||
static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
||||
{
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
@ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
||||
list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
|
||||
if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
|
||||
ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
|
||||
up_read(&cpufreq_rwsem);
|
||||
return ret;
|
||||
}
|
||||
@ -1096,7 +1118,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
err_set_policy_cpu:
|
||||
if (frozen)
|
||||
cpufreq_policy_put_kobj(policy);
|
||||
cpufreq_policy_free(policy);
|
||||
|
||||
nomem_out:
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
@ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
}
|
||||
|
||||
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
|
||||
unsigned int old_cpu, bool frozen)
|
||||
unsigned int old_cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
int ret;
|
||||
@ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
|
||||
/* first sibling now owns the new sysfs dir */
|
||||
cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
|
||||
|
||||
/* Don't touch sysfs files during light-weight tear-down */
|
||||
if (frozen)
|
||||
return cpu_dev->id;
|
||||
|
||||
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
|
||||
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
|
||||
if (ret) {
|
||||
@ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
if (!frozen)
|
||||
sysfs_remove_link(&dev->kobj, "cpufreq");
|
||||
} else if (cpus > 1) {
|
||||
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
|
||||
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
|
||||
if (new_cpu >= 0) {
|
||||
update_policy_cpu(policy, new_cpu);
|
||||
|
||||
@ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct cpufreq_policy *policy;
|
||||
struct kobject *kobj;
|
||||
struct completion *cmp;
|
||||
|
||||
read_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
@ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (!frozen) {
|
||||
down_read(&policy->rwsem);
|
||||
kobj = &policy->kobj;
|
||||
cmp = &policy->kobj_unregister;
|
||||
up_read(&policy->rwsem);
|
||||
kobject_put(kobj);
|
||||
|
||||
/*
|
||||
* We need to make sure that the underlying kobj is
|
||||
* actually not referenced anymore by anybody before we
|
||||
* proceed with unloading.
|
||||
*/
|
||||
pr_debug("waiting for dropping of refcount\n");
|
||||
wait_for_completion(cmp);
|
||||
pr_debug("wait complete\n");
|
||||
}
|
||||
if (!frozen)
|
||||
cpufreq_policy_put_kobj(policy);
|
||||
|
||||
/*
|
||||
* Perform the ->exit() even during light-weight tear-down,
|
||||
|
@ -62,6 +62,7 @@ config INTEL_IOATDMA
|
||||
tristate "Intel I/OAT DMA support"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select DCA
|
||||
help
|
||||
Enable support for the Intel(R) I/OAT DMA engine present
|
||||
@ -112,6 +113,7 @@ config MV_XOR
|
||||
bool "Marvell XOR engine support"
|
||||
depends on PLAT_ORION
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
---help---
|
||||
Enable support for the Marvell XOR engine.
|
||||
@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
|
||||
tristate "AMCC PPC440SPe ADMA support"
|
||||
depends on 440SPe || 440SP
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
|
||||
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
help
|
||||
@ -352,6 +355,7 @@ config NET_DMA
|
||||
bool "Network: TCP receive copy offload"
|
||||
depends on DMA_ENGINE && NET
|
||||
default (INTEL_IOATDMA || FSL_DMA)
|
||||
depends on BROKEN
|
||||
help
|
||||
This enables the use of DMA engines in the network stack to
|
||||
offload receive copy-to-user operations, freeing CPU cycles.
|
||||
@ -377,4 +381,7 @@ config DMATEST
|
||||
Simple DMA test client. Say N unless you're debugging a
|
||||
DMA Device driver.
|
||||
|
||||
config DMA_ENGINE_RAID
|
||||
bool
|
||||
|
||||
endif
|
||||
|
@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
|
||||
{
|
||||
return &chan->dev->device;
|
||||
}
|
||||
static struct device *chan2parent(struct dma_chan *chan)
|
||||
{
|
||||
return chan->dev->device.parent;
|
||||
}
|
||||
|
||||
#if defined(VERBOSE_DEBUG)
|
||||
static void vdbg_dump_regs(struct at_dma_chan *atchan)
|
||||
|
@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
|
||||
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
|
||||
static struct dmaengine_unmap_pool unmap_pool[] = {
|
||||
__UNMAP_POOL(2),
|
||||
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
|
||||
#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
|
||||
__UNMAP_POOL(16),
|
||||
__UNMAP_POOL(128),
|
||||
__UNMAP_POOL(256),
|
||||
@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
|
||||
dma_cookie_t cookie;
|
||||
unsigned long flags;
|
||||
|
||||
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
|
||||
if (!unmap)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -539,9 +539,9 @@ static int dmatest_func(void *data)
|
||||
|
||||
um->len = params->buf_size;
|
||||
for (i = 0; i < src_cnt; i++) {
|
||||
unsigned long buf = (unsigned long) thread->srcs[i];
|
||||
void *buf = thread->srcs[i];
|
||||
struct page *pg = virt_to_page(buf);
|
||||
unsigned pg_off = buf & ~PAGE_MASK;
|
||||
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
|
||||
|
||||
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
|
||||
um->len, DMA_TO_DEVICE);
|
||||
@ -559,9 +559,9 @@ static int dmatest_func(void *data)
|
||||
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
|
||||
dsts = &um->addr[src_cnt];
|
||||
for (i = 0; i < dst_cnt; i++) {
|
||||
unsigned long buf = (unsigned long) thread->dsts[i];
|
||||
void *buf = thread->dsts[i];
|
||||
struct page *pg = virt_to_page(buf);
|
||||
unsigned pg_off = buf & ~PAGE_MASK;
|
||||
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
|
||||
|
||||
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
|
||||
hw->count = CPU_TO_DMA(chan, count, 32);
|
||||
}
|
||||
|
||||
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
|
||||
{
|
||||
return DMA_TO_CPU(chan, desc->hw.count, 32);
|
||||
}
|
||||
|
||||
static void set_desc_src(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t src)
|
||||
{
|
||||
@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
|
||||
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
|
||||
}
|
||||
|
||||
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
u64 snoop_bits;
|
||||
|
||||
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
|
||||
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
|
||||
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
|
||||
}
|
||||
|
||||
static void set_desc_dst(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
|
||||
{
|
||||
@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
|
||||
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
|
||||
}
|
||||
|
||||
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
u64 snoop_bits;
|
||||
|
||||
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
|
||||
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
|
||||
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
|
||||
}
|
||||
|
||||
static void set_desc_next(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t next)
|
||||
{
|
||||
@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
|
||||
struct fsl_desc_sw *child;
|
||||
unsigned long flags;
|
||||
dma_cookie_t cookie;
|
||||
dma_cookie_t cookie = -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&chan->desc_lock, flags);
|
||||
|
||||
@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->async_tx;
|
||||
struct device *dev = chan->common.device->dev;
|
||||
dma_addr_t src = get_desc_src(chan, desc);
|
||||
dma_addr_t dst = get_desc_dst(chan, desc);
|
||||
u32 len = get_desc_cnt(chan, desc);
|
||||
|
||||
/* Run the link descriptor callback function */
|
||||
if (txd->callback) {
|
||||
|
@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
|
||||
hw_desc->desc_command = (1 << 31);
|
||||
}
|
||||
|
||||
static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
|
||||
{
|
||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||
return hw_desc->phy_dest_addr;
|
||||
}
|
||||
|
||||
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
|
||||
u32 byte_count)
|
||||
{
|
||||
@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
|
||||
/*
|
||||
* Perform a transaction to verify the HW works.
|
||||
*/
|
||||
#define MV_XOR_TEST_SIZE 2000
|
||||
|
||||
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cookie_t cookie;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
int err = 0;
|
||||
|
||||
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
|
||||
src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
|
||||
dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
|
||||
if (!dest) {
|
||||
kfree(src);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill in src buffer */
|
||||
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
|
||||
for (i = 0; i < PAGE_SIZE; i++)
|
||||
((u8 *) src)[i] = (u8)i;
|
||||
|
||||
dma_chan = &mv_chan->dmachan;
|
||||
@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dest_dma = dma_map_single(dma_chan->device->dev, dest,
|
||||
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
|
||||
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
|
||||
if (!unmap) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
src_dma = dma_map_single(dma_chan->device->dev, src,
|
||||
MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
|
||||
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
unmap->to_cnt = 1;
|
||||
unmap->addr[0] = src_dma;
|
||||
|
||||
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
unmap->from_cnt = 1;
|
||||
unmap->addr[1] = dest_dma;
|
||||
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
|
||||
MV_XOR_TEST_SIZE, 0);
|
||||
PAGE_SIZE, 0);
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
async_tx_ack(tx);
|
||||
@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
|
||||
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
|
||||
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (memcmp(src, dest, PAGE_SIZE)) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test copy failed compare, disabling\n");
|
||||
err = -ENODEV;
|
||||
@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
free_resources:
|
||||
dmaengine_unmap_put(unmap);
|
||||
mv_xor_free_chan_resources(dma_chan);
|
||||
out:
|
||||
kfree(src);
|
||||
@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
|
||||
dma_addr_t dest_dma;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cookie_t cookie;
|
||||
u8 cmp_byte = 0;
|
||||
u32 cmp_word;
|
||||
int err = 0;
|
||||
int src_count = MV_XOR_NUM_SRC_TEST;
|
||||
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++) {
|
||||
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
|
||||
if (!xor_srcs[src_idx]) {
|
||||
while (src_idx--)
|
||||
@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
/* Fill in src buffers */
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++) {
|
||||
u8 *ptr = page_address(xor_srcs[src_idx]);
|
||||
for (i = 0; i < PAGE_SIZE; i++)
|
||||
ptr[i] = (1 << src_idx);
|
||||
}
|
||||
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++)
|
||||
cmp_byte ^= (u8) (1 << src_idx);
|
||||
|
||||
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
|
||||
@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* test xor */
|
||||
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
|
||||
GFP_KERNEL);
|
||||
if (!unmap) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
|
||||
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
||||
0, PAGE_SIZE, DMA_TO_DEVICE);
|
||||
/* test xor */
|
||||
for (i = 0; i < src_count; i++) {
|
||||
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
||||
0, PAGE_SIZE, DMA_TO_DEVICE);
|
||||
dma_srcs[i] = unmap->addr[i];
|
||||
unmap->to_cnt++;
|
||||
}
|
||||
|
||||
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
dest_dma = unmap->addr[src_count];
|
||||
unmap->from_cnt = 1;
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
||||
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
|
||||
src_count, PAGE_SIZE, 0);
|
||||
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
free_resources:
|
||||
dmaengine_unmap_put(unmap);
|
||||
mv_xor_free_chan_resources(dma_chan);
|
||||
out:
|
||||
src_idx = MV_XOR_NUM_SRC_TEST;
|
||||
src_idx = src_count;
|
||||
while (src_idx--)
|
||||
__free_page(xor_srcs[src_idx]);
|
||||
__free_page(dest);
|
||||
@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
int i = 0;
|
||||
|
||||
for_each_child_of_node(pdev->dev.of_node, np) {
|
||||
struct mv_xor_chan *chan;
|
||||
dma_cap_mask_t cap_mask;
|
||||
int irq;
|
||||
|
||||
@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] =
|
||||
mv_xor_channel_add(xordev, pdev, i,
|
||||
cap_mask, irq);
|
||||
if (IS_ERR(xordev->channels[i])) {
|
||||
ret = PTR_ERR(xordev->channels[i]);
|
||||
xordev->channels[i] = NULL;
|
||||
chan = mv_xor_channel_add(xordev, pdev, i,
|
||||
cap_mask, irq);
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
irq_dispose_mapping(irq);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] = chan;
|
||||
i++;
|
||||
}
|
||||
} else if (pdata && pdata->channels) {
|
||||
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
|
||||
struct mv_xor_channel_data *cd;
|
||||
struct mv_xor_chan *chan;
|
||||
int irq;
|
||||
|
||||
cd = &pdata->channels[i];
|
||||
@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] =
|
||||
mv_xor_channel_add(xordev, pdev, i,
|
||||
cd->cap_mask, irq);
|
||||
if (IS_ERR(xordev->channels[i])) {
|
||||
ret = PTR_ERR(xordev->channels[i]);
|
||||
chan = mv_xor_channel_add(xordev, pdev, i,
|
||||
cd->cap_mask, irq);
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] = chan;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
|
||||
static inline void _init_desc(struct dma_pl330_desc *desc)
|
||||
{
|
||||
desc->pchan = NULL;
|
||||
desc->req.x = &desc->px;
|
||||
desc->req.token = desc;
|
||||
desc->rqcfg.swap = SWAP_NO;
|
||||
desc->rqcfg.privileged = 0;
|
||||
desc->rqcfg.insnaccess = 0;
|
||||
desc->rqcfg.scctl = SCCTRL0;
|
||||
desc->rqcfg.dcctl = DCCTRL0;
|
||||
desc->req.cfg = &desc->rqcfg;
|
||||
@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
|
||||
if (!pdmac)
|
||||
return 0;
|
||||
|
||||
desc = kmalloc(count * sizeof(*desc), flg);
|
||||
desc = kcalloc(count, sizeof(*desc), flg);
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
||||
|
@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
|
||||
hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
|
||||
}
|
||||
|
||||
/**
|
||||
* ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
|
||||
*/
|
||||
static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
|
||||
int value, unsigned long flags)
|
||||
{
|
||||
struct dma_cdb *hw_desc = desc->hw_desc;
|
||||
|
||||
memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
|
||||
desc->hw_next = NULL;
|
||||
desc->src_cnt = 1;
|
||||
desc->dst_cnt = 1;
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
set_bit(PPC440SPE_DESC_INT, &desc->flags);
|
||||
else
|
||||
clear_bit(PPC440SPE_DESC_INT, &desc->flags);
|
||||
|
||||
hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
|
||||
hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
|
||||
hw_desc->opc = DMA_CDB_OPC_DFILL128;
|
||||
}
|
||||
|
||||
/**
|
||||
* ppc440spe_desc_set_src_addr - set source address into the descriptor
|
||||
*/
|
||||
@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
|
||||
struct ppc440spe_adma_chan *chan,
|
||||
dma_cookie_t cookie)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(desc->async_tx.cookie < 0);
|
||||
if (desc->async_tx.cookie > 0) {
|
||||
cookie = desc->async_tx.cookie;
|
||||
@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
|
||||
ppc440spe_adma_prep_dma_interrupt;
|
||||
}
|
||||
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
|
||||
"( %s%s%s%s%s%s%s)\n",
|
||||
"( %s%s%s%s%s%s)\n",
|
||||
dev_name(adev->dev),
|
||||
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
|
||||
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
|
||||
|
@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
struct txx9dmac_slave *ds = dc->chan.private;
|
||||
|
||||
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
|
||||
txd->cookie, desc);
|
||||
|
@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
|
||||
.cmd_per_lun = 1,
|
||||
.can_queue = 1,
|
||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
|
||||
|
@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
|
||||
|
||||
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
|
||||
obj-$(CONFIG_EFI) += efi/
|
||||
obj-$(CONFIG_UEFI_CPER) += efi/
|
||||
|
@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
|
||||
backend for pstore by default. This setting can be overridden
|
||||
using the efivars module's pstore_disable parameter.
|
||||
|
||||
config UEFI_CPER
|
||||
def_bool n
|
||||
|
||||
endmenu
|
||||
|
||||
config UEFI_CPER
|
||||
bool
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Makefile for linux kernel
|
||||
#
|
||||
obj-y += efi.o vars.o
|
||||
obj-$(CONFIG_EFI) += efi.o vars.o
|
||||
obj-$(CONFIG_EFI_VARS) += efivars.o
|
||||
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
|
||||
obj-$(CONFIG_UEFI_CPER) += cper.o
|
||||
|
@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
static struct pstore_info efi_pstore_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "efi",
|
||||
.flags = PSTORE_FLAGS_FRAGILE,
|
||||
.open = efi_pstore_open,
|
||||
.close = efi_pstore_close,
|
||||
.read = efi_pstore_read,
|
||||
|
@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
|
||||
kfree(request);
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 completed_seqno;
|
||||
u32 acthd;
|
||||
u32 completed_seqno = ring->get_seqno(ring, false);
|
||||
u32 acthd = intel_ring_get_active_head(ring);
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
acthd = intel_ring_get_active_head(ring);
|
||||
completed_seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
if (i915_seqno_passed(completed_seqno, request->seqno))
|
||||
continue;
|
||||
|
||||
i915_set_reset_status(ring, request, acthd);
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
if (request->seqno > completed_seqno)
|
||||
i915_set_reset_status(ring, request, acthd);
|
||||
|
||||
i915_gem_free_request(request);
|
||||
}
|
||||
|
||||
@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
|
||||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Before we free the objects from the requests, we need to inspect
|
||||
* them for finding the guilty party. As the requests only borrow
|
||||
* their reference to the objects, the inspection must be done first.
|
||||
*/
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_lists(dev_priv, ring);
|
||||
i915_gem_reset_ring_status(dev_priv, ring);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_cleanup(dev_priv, ring);
|
||||
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
|
||||
|
@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head objects;
|
||||
int i, ret = 0;
|
||||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&objects);
|
||||
spin_lock(&file->table_lock);
|
||||
@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->obj_exec_link)) {
|
||||
@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, exec[i].handle, i);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
spin_unlock(&file->table_lock);
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(obj, &objects, obj_exec_link) {
|
||||
while (!list_empty(&objects)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
|
||||
/*
|
||||
* NOTE: We can leak any vmas created here when something fails
|
||||
* later on. But that's no issue since vma_unbind can deal with
|
||||
@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
ret = PTR_ERR(vma);
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Transfer ownership from the objects list to the vmas list. */
|
||||
list_add_tail(&vma->exec_list, &eb->vmas);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
|
||||
vma->exec_entry = &exec[i];
|
||||
if (eb->and < 0) {
|
||||
@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
++i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
||||
err:
|
||||
while (!list_empty(&objects)) {
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
if (ret)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
/*
|
||||
* Objects already transfered to the vmas list will be unreferenced by
|
||||
* eb_destroy.
|
||||
*/
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
||||
uint32_t val;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
|
||||
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
|
||||
WARN(crtc->active, "CRTC for pipe %c enabled\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
|
||||
@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
|
||||
u16 gmch_ctrl;
|
||||
|
||||
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
|
||||
pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
|
||||
if (state)
|
||||
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
|
||||
else
|
||||
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
|
||||
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
|
||||
pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
unsigned long irqflags;
|
||||
uint32_t tmp;
|
||||
|
||||
WARN_ON(dev_priv->pc8.enabled);
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
|
||||
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
|
||||
@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
static void __intel_power_well_get(struct drm_device *dev,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (!power_well->count++)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!power_well->count++) {
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
__intel_set_power_well(dev, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void __intel_power_well_put(struct drm_device *dev,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!power_well->count);
|
||||
if (!--power_well->count && i915_disable_power_well)
|
||||
if (!--power_well->count && i915_disable_power_well) {
|
||||
__intel_set_power_well(dev, false);
|
||||
hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
|
@ -8,5 +8,6 @@ config DRM_QXL
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select DRM_TTM
|
||||
select CRC32
|
||||
help
|
||||
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
|
||||
|
@ -24,7 +24,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include "linux/crc32.h"
|
||||
#include <linux/crc32.h>
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
|
||||
if (sad_count < 0) {
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
|
||||
return;
|
||||
}
|
||||
@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||
if (sad_count < 0) {
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||
return;
|
||||
}
|
||||
@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
|
||||
rdev->audio.enabled = true;
|
||||
|
||||
if (ASIC_IS_DCE8(rdev))
|
||||
rdev->audio.num_pins = 7;
|
||||
rdev->audio.num_pins = 6;
|
||||
else if (ASIC_IS_DCE61(rdev))
|
||||
rdev->audio.num_pins = 4;
|
||||
else
|
||||
rdev->audio.num_pins = 6;
|
||||
|
||||
|
@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
|
||||
if (sad_count < 0) {
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
|
||||
return;
|
||||
}
|
||||
@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||
if (sad_count < 0) {
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||
return;
|
||||
}
|
||||
|
@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
(rdev->pdev->device == 0x999C)) {
|
||||
rdev->config.cayman.max_simds_per_se = 6;
|
||||
rdev->config.cayman.max_backends_per_se = 2;
|
||||
rdev->config.cayman.max_hw_contexts = 8;
|
||||
rdev->config.cayman.sx_max_export_size = 256;
|
||||
rdev->config.cayman.sx_max_export_pos_size = 64;
|
||||
rdev->config.cayman.sx_max_export_smx_size = 192;
|
||||
} else if ((rdev->pdev->device == 0x9903) ||
|
||||
(rdev->pdev->device == 0x9904) ||
|
||||
(rdev->pdev->device == 0x990A) ||
|
||||
@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
(rdev->pdev->device == 0x999D)) {
|
||||
rdev->config.cayman.max_simds_per_se = 4;
|
||||
rdev->config.cayman.max_backends_per_se = 2;
|
||||
rdev->config.cayman.max_hw_contexts = 8;
|
||||
rdev->config.cayman.sx_max_export_size = 256;
|
||||
rdev->config.cayman.sx_max_export_pos_size = 64;
|
||||
rdev->config.cayman.sx_max_export_smx_size = 192;
|
||||
} else if ((rdev->pdev->device == 0x9919) ||
|
||||
(rdev->pdev->device == 0x9990) ||
|
||||
(rdev->pdev->device == 0x9991) ||
|
||||
@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
(rdev->pdev->device == 0x99A0)) {
|
||||
rdev->config.cayman.max_simds_per_se = 3;
|
||||
rdev->config.cayman.max_backends_per_se = 1;
|
||||
rdev->config.cayman.max_hw_contexts = 4;
|
||||
rdev->config.cayman.sx_max_export_size = 128;
|
||||
rdev->config.cayman.sx_max_export_pos_size = 32;
|
||||
rdev->config.cayman.sx_max_export_smx_size = 96;
|
||||
} else {
|
||||
rdev->config.cayman.max_simds_per_se = 2;
|
||||
rdev->config.cayman.max_backends_per_se = 1;
|
||||
rdev->config.cayman.max_hw_contexts = 4;
|
||||
rdev->config.cayman.sx_max_export_size = 128;
|
||||
rdev->config.cayman.sx_max_export_pos_size = 32;
|
||||
rdev->config.cayman.sx_max_export_smx_size = 96;
|
||||
}
|
||||
rdev->config.cayman.max_texture_channel_caches = 2;
|
||||
rdev->config.cayman.max_gprs = 256;
|
||||
@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
rdev->config.cayman.max_gs_threads = 32;
|
||||
rdev->config.cayman.max_stack_entries = 512;
|
||||
rdev->config.cayman.sx_num_of_sets = 8;
|
||||
rdev->config.cayman.sx_max_export_size = 256;
|
||||
rdev->config.cayman.sx_max_export_pos_size = 64;
|
||||
rdev->config.cayman.sx_max_export_smx_size = 192;
|
||||
rdev->config.cayman.max_hw_contexts = 8;
|
||||
rdev->config.cayman.sq_num_cf_insts = 2;
|
||||
|
||||
rdev->config.cayman.sc_prim_fifo_size = 0x40;
|
||||
|
@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
|
||||
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
|
||||
ASIC_INTERNAL_MEMORY_SS, 0);
|
||||
|
||||
/* disable ss, causes hangs on some cayman boards */
|
||||
if (rdev->family == CHIP_CAYMAN) {
|
||||
pi->sclk_ss = false;
|
||||
pi->mclk_ss = false;
|
||||
}
|
||||
|
||||
if (pi->sclk_ss || pi->mclk_ss)
|
||||
pi->dynamic_ss = true;
|
||||
else
|
||||
|
@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
* Don't move nonexistent data. Clear destination instead.
|
||||
*/
|
||||
if (old_iomap == NULL &&
|
||||
(ttm == NULL || ttm->state == tt_unpopulated)) {
|
||||
(ttm == NULL || (ttm->state == tt_unpopulated &&
|
||||
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
|
||||
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
|
||||
goto out2;
|
||||
}
|
||||
|
@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev,
|
||||
|
||||
if (!current_set_polling_and_test()) {
|
||||
|
||||
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
|
@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = 1,
|
||||
.scan_index = 1,
|
||||
.scan_type = IIO_ST('u', 12, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 12,
|
||||
.storagebits = 16,
|
||||
.shift = 0,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
.channel[1] = {
|
||||
.type = IIO_VOLTAGE,
|
||||
@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = 0,
|
||||
.scan_index = 0,
|
||||
.scan_type = IIO_ST('u', 12, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 12,
|
||||
.storagebits = 16,
|
||||
.shift = 0,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
|
||||
.int_vref_mv = 2500,
|
||||
|
@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = ADIS16448_BARO_OUT,
|
||||
.scan_index = ADIS16400_SCAN_BARO,
|
||||
.scan_type = IIO_ST('s', 16, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 's',
|
||||
.realbits = 16,
|
||||
.storagebits = 16,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
|
||||
IIO_CHAN_SOFT_TIMESTAMP(11)
|
||||
|
@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
static int cm36651_write_int_time(struct cm36651_data *cm36651,
|
||||
|
@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
|
||||
static void rem_ref(struct iw_cm_id *cm_id)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
int cb_destroy;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||
if (iwcm_deref_id(cm_id_priv) &&
|
||||
test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
|
||||
|
||||
/*
|
||||
* Test bit before deref in case the cm_id gets freed on another
|
||||
* thread.
|
||||
*/
|
||||
cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
||||
if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
|
||||
BUG_ON(!list_empty(&cm_id_priv->work_list));
|
||||
free_cm_id(cm_id_priv);
|
||||
}
|
||||
|
@ -49,12 +49,20 @@
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (void __user *) (ibuf); \
|
||||
(udata)->inbuf = (const void __user *) (ibuf); \
|
||||
(udata)->outbuf = (void __user *) (obuf); \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
|
||||
#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
|
||||
(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Our lifetime rules for these structs are the following:
|
||||
*
|
||||
|
@ -2593,6 +2593,9 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
|
||||
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
||||
union ib_flow_spec *ib_spec)
|
||||
{
|
||||
if (kern_spec->reserved)
|
||||
return -EINVAL;
|
||||
|
||||
ib_spec->type = kern_spec->type;
|
||||
|
||||
switch (ib_spec->type) {
|
||||
@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
||||
void *ib_spec;
|
||||
int i;
|
||||
|
||||
if (ucore->inlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
|
||||
if (ucore->outlen < sizeof(resp))
|
||||
return -ENOSPC;
|
||||
|
||||
@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
||||
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd.flow_attr.reserved[0] ||
|
||||
cmd.flow_attr.reserved[1])
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd.flow_attr.num_of_specs) {
|
||||
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
|
||||
GFP_KERNEL);
|
||||
@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
||||
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
|
||||
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
|
||||
i, cmd.flow_attr.size);
|
||||
err = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
|
||||
@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
|
||||
struct ib_uobject *uobj;
|
||||
int ret;
|
||||
|
||||
if (ucore->inlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cmd.comp_mask)
|
||||
return -EINVAL;
|
||||
|
||||
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
|
||||
file->ucontext);
|
||||
if (!uobj)
|
||||
|
@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
|
||||
return -EINVAL;
|
||||
|
||||
if (ex_hdr.cmd_hdr_reserved)
|
||||
return -EINVAL;
|
||||
|
||||
if (ex_hdr.response) {
|
||||
if (!hdr.out_words && !ex_hdr.provider_out_words)
|
||||
return -EINVAL;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE,
|
||||
(void __user *) (unsigned long) ex_hdr.response,
|
||||
(hdr.out_words + ex_hdr.provider_out_words) * 8))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (hdr.out_words || ex_hdr.provider_out_words)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_UDATA(&ucore,
|
||||
(hdr.in_words) ? buf : 0,
|
||||
(unsigned long)ex_hdr.response,
|
||||
hdr.in_words * 8,
|
||||
hdr.out_words * 8);
|
||||
INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
|
||||
hdr.in_words * 8, hdr.out_words * 8);
|
||||
|
||||
INIT_UDATA(&uhw,
|
||||
(ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
|
||||
(ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
|
||||
ex_hdr.provider_in_words * 8,
|
||||
ex_hdr.provider_out_words * 8);
|
||||
INIT_UDATA_BUF_OR_NULL(&uhw,
|
||||
buf + ucore.inlen,
|
||||
(unsigned long) ex_hdr.response + ucore.outlen,
|
||||
ex_hdr.provider_in_words * 8,
|
||||
ex_hdr.provider_out_words * 8);
|
||||
|
||||
err = uverbs_ex_cmd_table[command](file,
|
||||
&ucore,
|
||||
|
@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
}
|
||||
|
||||
#define VLAN_NONE 0xfff
|
||||
#define FILTER_SEL_VLAN_NONE 0xffff
|
||||
#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
|
||||
#define FILTER_SEL_WIDTH_VIN_P_FC \
|
||||
(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
|
||||
#define FILTER_SEL_WIDTH_TAG_P_FC \
|
||||
(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
|
||||
#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
|
||||
|
||||
static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
|
||||
struct l2t_entry *l2t)
|
||||
{
|
||||
unsigned int ntuple = 0;
|
||||
u32 viid;
|
||||
|
||||
switch (dev->rdev.lldi.filt_mode) {
|
||||
|
||||
/* default filter mode */
|
||||
case HW_TPL_FR_MT_PR_IV_P_FC:
|
||||
if (l2t->vlan == VLAN_NONE)
|
||||
ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
|
||||
else {
|
||||
ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
|
||||
ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
|
||||
}
|
||||
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
|
||||
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
|
||||
break;
|
||||
case HW_TPL_FR_MT_PR_OV_P_FC: {
|
||||
viid = cxgb4_port_viid(l2t->neigh->dev);
|
||||
|
||||
ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
|
||||
ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
|
||||
ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
|
||||
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
|
||||
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ntuple;
|
||||
}
|
||||
|
||||
static int send_connect(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_act_open_req *req;
|
||||
@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
req->local_ip = la->sin_addr.s_addr;
|
||||
req->peer_ip = ra->sin_addr.s_addr;
|
||||
req->opt0 = cpu_to_be64(opt0);
|
||||
req->params = cpu_to_be32(select_ntuple(ep->com.dev,
|
||||
ep->dst, ep->l2t));
|
||||
req->params = cpu_to_be32(cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
req->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
|
||||
@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
req6->peer_ip_lo = *((__be64 *)
|
||||
(ra6->sin6_addr.s6_addr + 8));
|
||||
req6->opt0 = cpu_to_be64(opt0);
|
||||
req6->params = cpu_to_be32(
|
||||
select_ntuple(ep->com.dev, ep->dst,
|
||||
ep->l2t));
|
||||
req6->params = cpu_to_be32(cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
req6->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
} else {
|
||||
@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
t5_req->peer_ip = ra->sin_addr.s_addr;
|
||||
t5_req->opt0 = cpu_to_be64(opt0);
|
||||
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
|
||||
select_ntuple(ep->com.dev,
|
||||
ep->dst, ep->l2t)));
|
||||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t)));
|
||||
t5_req->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t5_req6 = (struct cpl_t5_act_open_req6 *)
|
||||
@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
(ra6->sin6_addr.s6_addr + 8));
|
||||
t5_req6->opt0 = cpu_to_be64(opt0);
|
||||
t5_req6->params = (__force __be64)cpu_to_be32(
|
||||
select_ntuple(ep->com.dev, ep->dst, ep->l2t));
|
||||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
t5_req6->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
}
|
||||
@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
|
||||
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
|
||||
req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
|
||||
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
sin = (struct sockaddr_in *)&ep->com.local_addr;
|
||||
req->le.lport = sin->sin_port;
|
||||
@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
/*
|
||||
* Allocate a server TID.
|
||||
*/
|
||||
if (dev->rdev.lldi.enable_fw_ofld_conn)
|
||||
if (dev->rdev.lldi.enable_fw_ofld_conn &&
|
||||
ep->com.local_addr.ss_family == AF_INET)
|
||||
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
|
||||
cm_id->local_addr.ss_family, ep);
|
||||
else
|
||||
@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
/*
|
||||
* Calculate the server tid from filter hit index from cpl_rx_pkt.
|
||||
*/
|
||||
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
|
||||
- dev->rdev.lldi.tids->sftid_base
|
||||
+ dev->rdev.lldi.tids->nstids;
|
||||
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
|
||||
|
||||
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
|
||||
if (!lep) {
|
||||
@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
window = (__force u16) htons((__force u16)tcph->window);
|
||||
|
||||
/* Calcuate filter portion for LE region. */
|
||||
filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
|
||||
filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
|
||||
dev->rdev.lldi.ports[0],
|
||||
e));
|
||||
|
||||
/*
|
||||
* Synthesize the cpl_pass_accept_req. We have everything except the
|
||||
|
@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
|
||||
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
|
||||
{
|
||||
u32 remain = len;
|
||||
u32 dmalen;
|
||||
|
@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
||||
#include <linux/module.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "ipoib.h"
|
||||
@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!pdev)
|
||||
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
|
||||
return -ENODEV;
|
||||
|
||||
ppriv = netdev_priv(pdev);
|
||||
|
@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
|
||||
isert_conn->conn_rx_descs = NULL;
|
||||
}
|
||||
|
||||
static void isert_cq_tx_work(struct work_struct *);
|
||||
static void isert_cq_tx_callback(struct ib_cq *, void *);
|
||||
static void isert_cq_rx_work(struct work_struct *);
|
||||
static void isert_cq_rx_callback(struct ib_cq *, void *);
|
||||
|
||||
static int
|
||||
@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
|
||||
cq_desc[i].device = device;
|
||||
cq_desc[i].cq_index = i;
|
||||
|
||||
INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
|
||||
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
|
||||
isert_cq_rx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_RX_CQ_LEN, i);
|
||||
if (IS_ERR(device->dev_rx_cq[i]))
|
||||
if (IS_ERR(device->dev_rx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_rx_cq[i]);
|
||||
device->dev_rx_cq[i] = NULL;
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
|
||||
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
|
||||
isert_cq_tx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_TX_CQ_LEN, i);
|
||||
if (IS_ERR(device->dev_tx_cq[i]))
|
||||
if (IS_ERR(device->dev_tx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_tx_cq[i]);
|
||||
device->dev_tx_cq[i] = NULL;
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
|
||||
if (ret)
|
||||
goto out_cq;
|
||||
|
||||
if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
|
||||
goto out_cq;
|
||||
|
||||
if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
|
||||
ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
|
||||
if (ret)
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
|
||||
{
|
||||
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
|
||||
|
||||
INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
|
||||
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
|
||||
}
|
||||
|
||||
@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
|
||||
{
|
||||
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
|
||||
|
||||
INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
|
||||
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
|
||||
}
|
||||
|
||||
|
@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
|
||||
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
|
||||
int irq, int do_mask)
|
||||
{
|
||||
int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
|
||||
int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
|
||||
/* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
|
||||
int bitfield_width = 4;
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
|
||||
shift, bitfield_width,
|
||||
@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
|
||||
|
||||
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
|
||||
{
|
||||
/* The SENSE register is assumed to be 32-bit. */
|
||||
int bitfield_width = p->config.sense_bitfield_width;
|
||||
int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
|
||||
|
||||
|
@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
|
||||
int i;
|
||||
struct pci_dev *tmp_hfcpci = NULL;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#error "not running on big endian machines now"
|
||||
#endif
|
||||
|
||||
strcpy(tmp, hfcpci_revision);
|
||||
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
|
||||
|
||||
|
@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
|
||||
struct IsdnCardState *cs = card->cs;
|
||||
char tmp[64];
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#error "not running on big endian machines now"
|
||||
#endif
|
||||
|
||||
strcpy(tmp, telespci_revision);
|
||||
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
|
||||
if (cs->typ != ISDN_CTYPE_TELESPCI)
|
||||
|
@ -421,9 +421,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
|
||||
|
||||
if (watermark <= WATERMARK_METADATA) {
|
||||
SET_GC_MARK(b, GC_MARK_METADATA);
|
||||
SET_GC_MOVE(b, 0);
|
||||
b->prio = BTREE_PRIO;
|
||||
} else {
|
||||
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
|
||||
SET_GC_MOVE(b, 0);
|
||||
b->prio = INITIAL_PRIO;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user