mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 09:26:47 +07:00
Merge branch 'for-4.20/google' into for-linus
Whisker device specific fixes to hid-google driver
This commit is contained in:
commit
a600ffe6ec
@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Description:
|
||||
Amount (in KiB) of low (or normal) memory in the
|
||||
balloon.
|
||||
|
||||
What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
|
||||
Date: September 2018
|
||||
KernelVersion: 4.20
|
||||
Contact: xen-devel@lists.xenproject.org
|
||||
Description:
|
||||
Control scrubbing pages before returning them to Xen for others domains
|
||||
use. Can be set with xen_scrub_pages cmdline
|
||||
parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
|
||||
|
@ -5000,6 +5000,12 @@
|
||||
Disables the PV optimizations forcing the HVM guest to
|
||||
run as generic HVM guest with no PV drivers.
|
||||
|
||||
xen_scrub_pages= [XEN]
|
||||
Boolean option to control scrubbing pages before giving them back
|
||||
to Xen, for use by other domains. Can be also changed at runtime
|
||||
with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
|
||||
Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
|
||||
|
||||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
@ -348,3 +348,7 @@ Version History
|
||||
1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
|
||||
state races.
|
||||
1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
|
||||
1.14.0 Fix reshape race on small devices. Fix stripe adding reshape
|
||||
deadlock/potential data corruption. Update superblock when
|
||||
specific devices are requested via rebuild. Fix RAID leg
|
||||
rebuild errors.
|
||||
|
@ -1,4 +1,4 @@
|
||||
Device-Tree bindings for input/gpio_keys.c keyboard driver
|
||||
Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
|
||||
|
||||
Required properties:
|
||||
- compatible = "gpio-keys";
|
||||
|
@ -10,6 +10,7 @@ Required properties:
|
||||
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
|
||||
the Cadence GEM, or the generic form: "cdns,gem".
|
||||
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
|
||||
Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
|
||||
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
|
||||
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
|
||||
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
|
||||
|
@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
|
||||
arches.
|
||||
|
||||
v86d source code can be downloaded from the following website:
|
||||
http://dev.gentoo.org/~spock/projects/uvesafb
|
||||
|
||||
https://github.com/mjanusz/v86d
|
||||
|
||||
Please refer to the v86d documentation for detailed configuration and
|
||||
installation instructions.
|
||||
@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
|
||||
|
||||
--
|
||||
Michal Januszewski <spock@gentoo.org>
|
||||
Last updated: 2009-03-30
|
||||
Last updated: 2017-10-10
|
||||
|
||||
Documentation of the uvesafb options is loosely based on vesafb.txt.
|
||||
|
||||
|
@ -848,7 +848,7 @@ struct file_operations
|
||||
----------------------
|
||||
|
||||
This describes how the VFS can manipulate an open file. As of kernel
|
||||
4.1, the following members are defined:
|
||||
4.18, the following members are defined:
|
||||
|
||||
struct file_operations {
|
||||
struct module *owner;
|
||||
@ -858,11 +858,11 @@ struct file_operations {
|
||||
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
|
||||
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
|
||||
int (*iterate) (struct file *, struct dir_context *);
|
||||
int (*iterate_shared) (struct file *, struct dir_context *);
|
||||
__poll_t (*poll) (struct file *, struct poll_table_struct *);
|
||||
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
int (*mmap) (struct file *, struct vm_area_struct *);
|
||||
int (*mremap)(struct file *, struct vm_area_struct *);
|
||||
int (*open) (struct inode *, struct file *);
|
||||
int (*flush) (struct file *, fl_owner_t id);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
@ -882,6 +882,10 @@ struct file_operations {
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned (*mmap_capabilities)(struct file *);
|
||||
#endif
|
||||
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
|
||||
int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
|
||||
int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
|
||||
int (*fadvise)(struct file *, loff_t, loff_t, int);
|
||||
};
|
||||
|
||||
Again, all methods are called without any locks being held, unless
|
||||
@ -899,6 +903,9 @@ otherwise noted.
|
||||
|
||||
iterate: called when the VFS needs to read the directory contents
|
||||
|
||||
iterate_shared: called when the VFS needs to read the directory contents
|
||||
when filesystem supports concurrent dir iterators
|
||||
|
||||
poll: called by the VFS when a process wants to check if there is
|
||||
activity on this file and (optionally) go to sleep until there
|
||||
is activity. Called by the select(2) and poll(2) system calls
|
||||
@ -951,6 +958,16 @@ otherwise noted.
|
||||
|
||||
fallocate: called by the VFS to preallocate blocks or punch a hole.
|
||||
|
||||
copy_file_range: called by the copy_file_range(2) system call.
|
||||
|
||||
clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
|
||||
FICLONE commands.
|
||||
|
||||
dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
|
||||
command.
|
||||
|
||||
fadvise: possibly called by the fadvise64() system call.
|
||||
|
||||
Note that the file operations are implemented by the specific
|
||||
filesystem in which the inode resides. When opening a device node
|
||||
(character or block special) most filesystems will call special
|
||||
|
@ -33,4 +33,3 @@ Video Function Calls
|
||||
video-clear-buffer
|
||||
video-set-streamtype
|
||||
video-set-format
|
||||
video-set-attributes
|
||||
|
81
Documentation/process/code-of-conduct.rst
Normal file
81
Documentation/process/code-of-conduct.rst
Normal file
@ -0,0 +1,81 @@
|
||||
Contributor Covenant Code of Conduct
|
||||
++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Our Pledge
|
||||
==========
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and
|
||||
expression, level of experience, education, socio-economic status, nationality,
|
||||
personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
Our Standards
|
||||
=============
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others’ private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
|
||||
Our Responsibilities
|
||||
====================
|
||||
|
||||
Maintainers are responsible for clarifying the standards of acceptable behavior
|
||||
and are expected to take appropriate and fair corrective action in response to
|
||||
any instances of unacceptable behavior.
|
||||
|
||||
Maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, or to ban temporarily or permanently any
|
||||
contributor for other behaviors that they deem inappropriate, threatening,
|
||||
offensive, or harmful.
|
||||
|
||||
Scope
|
||||
=====
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
Enforcement
|
||||
===========
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the Technical Advisory Board (TAB) at
|
||||
<tab@lists.linux-foundation.org>. All complaints will be reviewed and
|
||||
investigated and will result in a response that is deemed necessary and
|
||||
appropriate to the circumstances. The TAB is obligated to maintain
|
||||
confidentiality with regard to the reporter of an incident. Further details of
|
||||
specific enforcement policies may be posted separately.
|
||||
|
||||
Maintainers who do not follow or enforce the Code of Conduct in good faith may
|
||||
face temporary or permanent repercussions as determined by other members of the
|
||||
project’s leadership.
|
||||
|
||||
Attribution
|
||||
===========
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
@ -1,28 +0,0 @@
|
||||
Code of Conflict
|
||||
----------------
|
||||
|
||||
The Linux kernel development effort is a very personal process compared
|
||||
to "traditional" ways of developing software. Your code and ideas
|
||||
behind it will be carefully reviewed, often resulting in critique and
|
||||
criticism. The review will almost always require improvements to the
|
||||
code before it can be included in the kernel. Know that this happens
|
||||
because everyone involved wants to see the best possible solution for
|
||||
the overall success of Linux. This development process has been proven
|
||||
to create the most robust operating system kernel ever, and we do not
|
||||
want to do anything to cause the quality of submission and eventual
|
||||
result to ever decrease.
|
||||
|
||||
If however, anyone feels personally abused, threatened, or otherwise
|
||||
uncomfortable due to this process, that is not acceptable. If so,
|
||||
please contact the Linux Foundation's Technical Advisory Board at
|
||||
<tab@lists.linux-foundation.org>, or the individual members, and they
|
||||
will work to resolve the issue to the best of their ability. For more
|
||||
information on who is on the Technical Advisory Board and what their
|
||||
role is, please see:
|
||||
|
||||
- http://www.linuxfoundation.org/projects/linux/tab
|
||||
|
||||
As a reviewer of code, please strive to keep things civil and focused on
|
||||
the technical issues involved. We are all humans, and frustrations can
|
||||
be high on both sides of the process. Try to keep in mind the immortal
|
||||
words of Bill and Ted, "Be excellent to each other."
|
@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
|
||||
:maxdepth: 1
|
||||
|
||||
howto
|
||||
code-of-conflict
|
||||
code-of-conduct
|
||||
development-process
|
||||
submitting-patches
|
||||
coding-style
|
||||
|
@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
Returns: 0 on success, -EINVAL if hpage module parameter was not set
|
||||
or cmma is enabled
|
||||
or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
|
||||
flag set
|
||||
|
||||
With this capability the KVM support for memory backing with 1m pages
|
||||
through hugetlbfs can be enabled for a VM. After the capability is
|
||||
@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned.
|
||||
While it is generally possible to create a huge page backed VM without
|
||||
this capability, the VM will not be able to run.
|
||||
|
||||
7.14 KVM_CAP_MSR_PLATFORM_INFO
|
||||
|
||||
Architectures: x86
|
||||
Parameters: args[0] whether feature should be enabled or not
|
||||
|
||||
With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
|
||||
a #GP would be raised when the guest tries to access. Currently, this
|
||||
capability does not enable write permissions of this MSR for the guest.
|
||||
|
||||
8. Other capabilities.
|
||||
----------------------
|
||||
|
||||
|
@ -35,25 +35,25 @@ and two USB cables, connected like this:
|
||||
( If your system does not list a debug port capability then you probably
|
||||
won't be able to use the USB debug key. )
|
||||
|
||||
b.) You also need a Netchip USB debug cable/key:
|
||||
b.) You also need a NetChip USB debug cable/key:
|
||||
|
||||
http://www.plxtech.com/products/NET2000/NET20DC/default.asp
|
||||
|
||||
This is a small blue plastic connector with two USB connections,
|
||||
This is a small blue plastic connector with two USB connections;
|
||||
it draws power from its USB connections.
|
||||
|
||||
c.) You need a second client/console system with a high speed USB 2.0
|
||||
port.
|
||||
|
||||
d.) The Netchip device must be plugged directly into the physical
|
||||
d.) The NetChip device must be plugged directly into the physical
|
||||
debug port on the "host/target" system. You cannot use a USB hub in
|
||||
between the physical debug port and the "host/target" system.
|
||||
|
||||
The EHCI debug controller is bound to a specific physical USB
|
||||
port and the Netchip device will only work as an early printk
|
||||
port and the NetChip device will only work as an early printk
|
||||
device in this port. The EHCI host controllers are electrically
|
||||
wired such that the EHCI debug controller is hooked up to the
|
||||
first physical and there is no way to change this via software.
|
||||
first physical port and there is no way to change this via software.
|
||||
You can find the physical port through experimentation by trying
|
||||
each physical port on the system and rebooting. Or you can try
|
||||
and use lsusb or look at the kernel info messages emitted by the
|
||||
@ -65,9 +65,9 @@ and two USB cables, connected like this:
|
||||
to the hardware vendor, because there is no reason not to wire
|
||||
this port into one of the physically accessible ports.
|
||||
|
||||
e.) It is also important to note, that many versions of the Netchip
|
||||
e.) It is also important to note, that many versions of the NetChip
|
||||
device require the "client/console" system to be plugged into the
|
||||
right and side of the device (with the product logo facing up and
|
||||
right hand side of the device (with the product logo facing up and
|
||||
readable left to right). The reason being is that the 5 volt
|
||||
power supply is taken from only one side of the device and it
|
||||
must be the side that does not get rebooted.
|
||||
@ -81,13 +81,18 @@ and two USB cables, connected like this:
|
||||
CONFIG_EARLY_PRINTK_DBGP=y
|
||||
|
||||
And you need to add the boot command line: "earlyprintk=dbgp".
|
||||
|
||||
(If you are using Grub, append it to the 'kernel' line in
|
||||
/etc/grub.conf)
|
||||
/etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
|
||||
append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
|
||||
using Grub2 on an EFI firmware system, append it to the 'linux'
|
||||
or 'linuxefi' line in /boot/grub2/grub.cfg or
|
||||
/boot/efi/EFI/<distro>/grub.cfg.)
|
||||
|
||||
On systems with more than one EHCI debug controller you must
|
||||
specify the correct EHCI debug controller number. The ordering
|
||||
comes from the PCI bus enumeration of the EHCI controllers. The
|
||||
default with no number argument is "0" the first EHCI debug
|
||||
default with no number argument is "0" or the first EHCI debug
|
||||
controller. To use the second EHCI debug controller, you would
|
||||
use the command line: "earlyprintk=dbgp1"
|
||||
|
||||
@ -111,7 +116,7 @@ and two USB cables, connected like this:
|
||||
see the raw output.
|
||||
|
||||
c.) On Nvidia Southbridge based systems: the kernel will try to probe
|
||||
and find out which port has debug device connected.
|
||||
and find out which port has a debug device connected.
|
||||
|
||||
3. Testing that it works fine:
|
||||
|
||||
|
66
MAINTAINERS
66
MAINTAINERS
@ -1251,7 +1251,7 @@ N: meson
|
||||
|
||||
ARM/Annapurna Labs ALPINE ARCHITECTURE
|
||||
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
|
||||
M: Antoine Tenart <antoine.tenart@free-electrons.com>
|
||||
M: Antoine Tenart <antoine.tenart@bootlin.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-alpine/
|
||||
@ -5625,6 +5625,8 @@ F: lib/fault-inject.c
|
||||
|
||||
FBTFT Framebuffer drivers
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/staging/fbtft/
|
||||
|
||||
@ -6060,7 +6062,7 @@ F: Documentation/gcc-plugins.txt
|
||||
|
||||
GASKET DRIVER FRAMEWORK
|
||||
M: Rob Springer <rspringer@google.com>
|
||||
M: John Joseph <jnjoseph@google.com>
|
||||
M: Todd Poynor <toddpoynor@google.com>
|
||||
M: Ben Chan <benchan@chromium.org>
|
||||
S: Maintained
|
||||
F: drivers/staging/gasket/
|
||||
@ -7016,6 +7018,20 @@ F: drivers/crypto/vmx/aes*
|
||||
F: drivers/crypto/vmx/ghash*
|
||||
F: drivers/crypto/vmx/ppc-xlate.pl
|
||||
|
||||
IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
|
||||
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
F: drivers/pci/hotplug/rpaphp*
|
||||
|
||||
IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
|
||||
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
F: drivers/pci/hotplug/rpadlpar*
|
||||
|
||||
IBM ServeRAID RAID DRIVER
|
||||
S: Orphan
|
||||
F: drivers/scsi/ips.*
|
||||
@ -8300,7 +8316,7 @@ F: include/linux/libata.h
|
||||
F: Documentation/devicetree/bindings/ata/
|
||||
|
||||
LIBLOCKDEP
|
||||
M: Sasha Levin <alexander.levin@verizon.com>
|
||||
M: Sasha Levin <alexander.levin@microsoft.com>
|
||||
S: Maintained
|
||||
F: tools/lib/lockdep/
|
||||
|
||||
@ -9700,13 +9716,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
|
||||
S: Maintained
|
||||
F: drivers/media/dvb-frontends/mn88473*
|
||||
|
||||
PCI DRIVER FOR MOBIVEIL PCIE IP
|
||||
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
|
||||
F: drivers/pci/controller/pcie-mobiveil.c
|
||||
|
||||
MODULE SUPPORT
|
||||
M: Jessica Yu <jeyu@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
||||
@ -10933,7 +10942,7 @@ M: Willy Tarreau <willy@haproxy.com>
|
||||
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
|
||||
S: Odd Fixes
|
||||
F: Documentation/auxdisplay/lcd-panel-cgram.txt
|
||||
F: drivers/misc/panel.c
|
||||
F: drivers/auxdisplay/panel.c
|
||||
|
||||
PARALLEL PORT SUBSYSTEM
|
||||
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
|
||||
@ -11121,6 +11130,13 @@ F: include/uapi/linux/switchtec_ioctl.h
|
||||
F: include/linux/switchtec.h
|
||||
F: drivers/ntb/hw/mscc/
|
||||
|
||||
PCI DRIVER FOR MOBIVEIL PCIE IP
|
||||
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
|
||||
F: drivers/pci/controller/pcie-mobiveil.c
|
||||
|
||||
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
@ -11154,7 +11170,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
|
||||
|
||||
PCI DRIVER FOR SYNOPSYS DESIGNWARE
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
M: Joao Pinto <Joao.Pinto@synopsys.com>
|
||||
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
|
||||
@ -11187,8 +11203,14 @@ F: tools/pci/
|
||||
|
||||
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
|
||||
M: Russell Currey <ruscur@russell.cc>
|
||||
M: Sam Bobroff <sbobroff@linux.ibm.com>
|
||||
M: Oliver O'Halloran <oohall@gmail.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
F: Documentation/PCI/pci-error-recovery.txt
|
||||
F: drivers/pci/pcie/aer.c
|
||||
F: drivers/pci/pcie/dpc.c
|
||||
F: drivers/pci/pcie/err.c
|
||||
F: Documentation/powerpc/eeh-pci-error-recovery.txt
|
||||
F: arch/powerpc/kernel/eeh*.c
|
||||
F: arch/powerpc/platforms/*/eeh*.c
|
||||
@ -11346,10 +11368,10 @@ S: Maintained
|
||||
F: drivers/platform/x86/peaq-wmi.c
|
||||
|
||||
PER-CPU MEMORY ALLOCATOR
|
||||
M: Dennis Zhou <dennis@kernel.org>
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
M: Christoph Lameter <cl@linux.com>
|
||||
M: Dennis Zhou <dennisszhou@gmail.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
|
||||
S: Maintained
|
||||
F: include/linux/percpu*.h
|
||||
F: mm/percpu*.c
|
||||
@ -12244,6 +12266,7 @@ F: Documentation/networking/rds.txt
|
||||
|
||||
RDT - RESOURCE ALLOCATION
|
||||
M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/kernel/cpu/intel_rdt*
|
||||
@ -13433,9 +13456,8 @@ F: drivers/i2c/busses/i2c-synquacer.c
|
||||
F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
|
||||
|
||||
SOCIONEXT UNIPHIER SOUND DRIVER
|
||||
M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: sound/soc/uniphier/
|
||||
|
||||
SOEKRIS NET48XX LED SUPPORT
|
||||
@ -15373,7 +15395,7 @@ S: Maintained
|
||||
UVESAFB DRIVER
|
||||
M: Michal Januszewski <spock@gentoo.org>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
W: http://dev.gentoo.org/~spock/projects/uvesafb/
|
||||
W: https://github.com/mjanusz/v86d
|
||||
S: Maintained
|
||||
F: Documentation/fb/uvesafb.txt
|
||||
F: drivers/video/fbdev/uvesafb.*
|
||||
@ -15897,6 +15919,7 @@ F: net/x25/
|
||||
X86 ARCHITECTURE (32-BIT AND 64-BIT)
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Borislav Petkov <bp@alien8.de>
|
||||
R: "H. Peter Anvin" <hpa@zytor.com>
|
||||
M: x86@kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
@ -15925,6 +15948,15 @@ M: Borislav Petkov <bp@alien8.de>
|
||||
S: Maintained
|
||||
F: arch/x86/kernel/cpu/microcode/*
|
||||
|
||||
X86 MM
|
||||
M: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
|
||||
S: Maintained
|
||||
F: arch/x86/mm/
|
||||
|
||||
X86 PLATFORM DRIVERS
|
||||
M: Darren Hart <dvhart@infradead.org>
|
||||
M: Andy Shevchenko <andy@infradead.org>
|
||||
|
24
Makefile
24
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
|
||||
KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
|
||||
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
|
||||
|
||||
# SUBARCH tells the usermode build what the underlying arch is. That is set
|
||||
# first, and if a usermode build is happening, the "ARCH=um" on the command
|
||||
# line overrides the setting of ARCH below. If a native build is happening,
|
||||
# then ARCH is assigned, getting whatever value it gets normally, and
|
||||
# SUBARCH is subsequently ignored.
|
||||
|
||||
SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
|
||||
-e s/sun4u/sparc64/ \
|
||||
-e s/arm.*/arm/ -e s/sa110/arm/ \
|
||||
-e s/s390x/s390/ -e s/parisc64/parisc/ \
|
||||
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
|
||||
-e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
|
||||
-e s/riscv.*/riscv/)
|
||||
include scripts/subarch.include
|
||||
|
||||
# Cross compiling and selecting different set of gcc/bin-utils
|
||||
# ---------------------------------------------------------------------------
|
||||
@ -616,6 +604,11 @@ CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
|
||||
$(call cc-disable-warning,maybe-uninitialized,)
|
||||
export CFLAGS_GCOV
|
||||
|
||||
# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CC_FLAGS_FTRACE := -pg
|
||||
endif
|
||||
|
||||
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
|
||||
# values of the respective KBUILD_* variables
|
||||
ARCH_CPPFLAGS :=
|
||||
@ -755,9 +748,6 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ifndef CC_FLAGS_FTRACE
|
||||
CC_FLAGS_FTRACE := -pg
|
||||
endif
|
||||
ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
# gcc 5 supports generating the mcount tables directly
|
||||
ifeq ($(call cc-option-yn,-mrecord-mcount),y)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "sama5d2-pinfunc.h"
|
||||
#include <dt-bindings/mfd/atmel-flexcom.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/pinctrl/at91.h>
|
||||
|
||||
/ {
|
||||
model = "Atmel SAMA5D2 PTC EK";
|
||||
@ -299,6 +300,7 @@ re_we_data {
|
||||
<PIN_PA30__NWE_NANDWE>,
|
||||
<PIN_PB2__NRD_NANDOE>;
|
||||
bias-pull-up;
|
||||
atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
|
||||
};
|
||||
|
||||
ale_cle_rdy_cs {
|
||||
|
@ -106,21 +106,23 @@ gic: interrupt-controller@1e100 {
|
||||
global_timer: timer@1e200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x1e200 0x20>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&axi_clk>;
|
||||
};
|
||||
|
||||
local_timer: local-timer@1e600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x1e600 0x20>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
|
||||
IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&axi_clk>;
|
||||
};
|
||||
|
||||
twd_watchdog: watchdog@1e620 {
|
||||
compatible = "arm,cortex-a9-twd-wdt";
|
||||
reg = <0x1e620 0x20>;
|
||||
interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
|
||||
IRQ_TYPE_LEVEL_HIGH)>;
|
||||
};
|
||||
|
||||
armpll: armpll {
|
||||
@ -158,7 +160,7 @@ timer: timer@80 {
|
||||
serial0: serial@600 {
|
||||
compatible = "brcm,bcm6345-uart";
|
||||
reg = <0x600 0x1b>;
|
||||
interrupts = <GIC_SPI 32 0>;
|
||||
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&periph_clk>;
|
||||
clock-names = "periph";
|
||||
status = "disabled";
|
||||
@ -167,7 +169,7 @@ serial0: serial@600 {
|
||||
serial1: serial@620 {
|
||||
compatible = "brcm,bcm6345-uart";
|
||||
reg = <0x620 0x1b>;
|
||||
interrupts = <GIC_SPI 33 0>;
|
||||
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&periph_clk>;
|
||||
clock-names = "periph";
|
||||
status = "disabled";
|
||||
@ -180,7 +182,7 @@ nand: nand@2000 {
|
||||
reg = <0x2000 0x600>, <0xf0 0x10>;
|
||||
reg-names = "nand", "nand-int-base";
|
||||
status = "disabled";
|
||||
interrupts = <GIC_SPI 38 0>;
|
||||
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "nand";
|
||||
};
|
||||
|
||||
|
@ -41,7 +41,7 @@ macb1_clk: macb1_clk {
|
||||
};
|
||||
|
||||
macb1: ethernet@f802c000 {
|
||||
compatible = "cdns,at91sam9260-macb", "cdns,macb";
|
||||
compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
|
||||
reg = <0xf802c000 0x100>;
|
||||
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
|
||||
pinctrl-names = "default";
|
||||
|
@ -1078,8 +1078,8 @@ spi6: spi@5c001000 {
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&rcc SPI6_K>;
|
||||
resets = <&rcc SPI6_R>;
|
||||
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
|
||||
<&mdma1 35 0x0 0x40002 0x0 0x0 0>;
|
||||
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
|
||||
<&mdma1 35 0x0 0x40002 0x0 0x0>;
|
||||
dma-names = "rx", "tx";
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -800,8 +800,7 @@ hdmi_out: port@1 {
|
||||
};
|
||||
|
||||
hdmi_phy: hdmi-phy@1ef0000 {
|
||||
compatible = "allwinner,sun8i-r40-hdmi-phy",
|
||||
"allwinner,sun50i-a64-hdmi-phy";
|
||||
compatible = "allwinner,sun8i-r40-hdmi-phy";
|
||||
reg = <0x01ef0000 0x10000>;
|
||||
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
|
||||
<&ccu 7>, <&ccu 16>;
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: nop\n\t"
|
||||
asm_volatile_goto("1: nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
@ -42,7 +42,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
|
||||
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
||||
{
|
||||
asm goto("1: b %l[l_yes]\n\t"
|
||||
asm_volatile_goto("1: b %l[l_yes]\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align 3\n\t"
|
||||
".quad 1b, %l[l_yes], %c0\n\t"
|
||||
|
@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
|
||||
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
|
||||
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
|
||||
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
|
||||
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
|
||||
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
|
||||
|
||||
|
19
arch/arm64/kernel/crash_core.c
Normal file
19
arch/arm64/kernel/crash_core.c
Normal file
@ -0,0 +1,19 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) Linaro.
|
||||
* Copyright (C) Huawei Futurewei Technologies.
|
||||
*/
|
||||
|
||||
#include <linux/crash_core.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
void arch_crash_save_vmcoreinfo(void)
|
||||
{
|
||||
VMCOREINFO_NUMBER(VA_BITS);
|
||||
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
|
||||
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
|
||||
kimage_voffset);
|
||||
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
|
||||
PHYS_OFFSET);
|
||||
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
|
||||
}
|
@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
|
||||
void arch_crash_save_vmcoreinfo(void)
|
||||
{
|
||||
VMCOREINFO_NUMBER(VA_BITS);
|
||||
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
|
||||
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
|
||||
kimage_voffset);
|
||||
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
|
||||
PHYS_OFFSET);
|
||||
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
|
||||
}
|
||||
|
@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
|
||||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int validate_core_offset(const struct kvm_one_reg *reg)
|
||||
{
|
||||
u64 off = core_reg_offset_from_id(reg->id);
|
||||
int size;
|
||||
|
||||
switch (off) {
|
||||
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(regs.regs[30]):
|
||||
case KVM_REG_ARM_CORE_REG(regs.sp):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pc):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pstate):
|
||||
case KVM_REG_ARM_CORE_REG(sp_el1):
|
||||
case KVM_REG_ARM_CORE_REG(elr_el1):
|
||||
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
|
||||
size = sizeof(__u64);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
|
||||
size = sizeof(__uint128_t);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
|
||||
size = sizeof(__u32);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) == size &&
|
||||
IS_ALIGNED(off, size / sizeof(__u32)))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/*
|
||||
@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
||||
return -EINVAL;
|
||||
|
||||
@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
}
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
|
||||
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case PSR_AA32_MODE_USR:
|
||||
if (!system_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_AA32_MODE_FIQ:
|
||||
case PSR_AA32_MODE_IRQ:
|
||||
case PSR_AA32_MODE_SVC:
|
||||
case PSR_AA32_MODE_ABT:
|
||||
case PSR_AA32_MODE_UND:
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
case PSR_MODE_EL1t:
|
||||
case PSR_MODE_EL1h:
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
|
@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
|
||||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty bit for any page in the set, so check
|
||||
* them all. All hugetlb entries are already young.
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
}
|
||||
|
||||
if (valid) {
|
||||
@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
/*
|
||||
* huge_ptep_set_access_flags will update access flags (dirty, accesssed)
|
||||
* and write permission.
|
||||
*
|
||||
* For a contiguous huge pte range we need to check whether or not write
|
||||
* permission has to change only on the first pte in the set. Then for
|
||||
* all the contiguous ptes we need to check whether or not there is a
|
||||
* discrepancy between dirty or young.
|
||||
*/
|
||||
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < ncontig; i++) {
|
||||
pte_t orig_pte = huge_ptep_get(ptep + i);
|
||||
|
||||
if (pte_dirty(pte) != pte_dirty(orig_pte))
|
||||
return 1;
|
||||
|
||||
if (pte_young(pte) != pte_young(orig_pte))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
int ncontig, i, changed = 0;
|
||||
int ncontig, i;
|
||||
size_t pgsize = 0;
|
||||
unsigned long pfn = pte_pfn(pte), dpfn;
|
||||
pgprot_t hugeprot;
|
||||
@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
|
||||
dpfn = pgsize >> PAGE_SHIFT;
|
||||
|
||||
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
||||
if (!pte_same(orig_pte, pte))
|
||||
changed = 1;
|
||||
if (!__cont_access_flags_changed(ptep, pte, ncontig))
|
||||
return 0;
|
||||
|
||||
/* Make sure we don't lose the dirty state */
|
||||
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
||||
|
||||
/* Make sure we don't lose the dirty or young state */
|
||||
if (pte_dirty(orig_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
||||
if (pte_young(orig_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
|
||||
hugeprot = pte_pgprot(pte);
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
||||
|
||||
return changed;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
|
@ -211,7 +211,7 @@ static inline long ffz(int x)
|
||||
* This is defined the same way as ffs.
|
||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
||||
*/
|
||||
static inline long fls(int x)
|
||||
static inline int fls(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -232,7 +232,7 @@ static inline long fls(int x)
|
||||
* the libc and compiler builtin ffs routines, therefore
|
||||
* differs in spirit from the above ffz (man ffs).
|
||||
*/
|
||||
static inline long ffs(int x)
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
panic("Can't create %s() memory pool!", __func__);
|
||||
else
|
||||
gen_pool_add(coherent_pool,
|
||||
pfn_to_virt(max_low_pfn),
|
||||
(unsigned long)pfn_to_virt(max_low_pfn),
|
||||
hexagon_coherent_pool_size, -1);
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ struct ltq_dma_channel {
|
||||
int desc; /* the current descriptor */
|
||||
struct ltq_dma_desc *desc_base; /* the descriptor base */
|
||||
int phys; /* physical addr */
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
|
||||
unsigned long flags;
|
||||
|
||||
ch->desc = 0;
|
||||
ch->desc_base = dma_zalloc_coherent(NULL,
|
||||
ch->desc_base = dma_zalloc_coherent(ch->dev,
|
||||
LTQ_DESC_NUM * LTQ_DESC_SIZE,
|
||||
&ch->phys, GFP_ATOMIC);
|
||||
|
||||
@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
|
||||
if (!ch->desc_base)
|
||||
return;
|
||||
ltq_dma_close(ch);
|
||||
dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
|
||||
dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
|
||||
ch->desc_base, ch->phys);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ltq_dma_free);
|
||||
|
@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
|
||||
return hash__vmemmap_remove_mapping(start, page_size);
|
||||
}
|
||||
#endif
|
||||
struct page *realmode_pfn_to_page(unsigned long pfn);
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
{
|
||||
|
@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
|
||||
extern int __init tce_iommu_bus_notifier_init(void);
|
||||
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long *hpa, enum dma_data_direction *direction);
|
||||
extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long *hpa, enum dma_data_direction *direction);
|
||||
#else
|
||||
static inline void iommu_register_group(struct iommu_table_group *table_group,
|
||||
int pci_domain_number,
|
||||
|
@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||
extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
|
||||
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
||||
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
||||
#endif
|
||||
|
@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
|
||||
|
||||
extern unsigned int rtas_data;
|
||||
extern unsigned long long memory_limit;
|
||||
extern bool init_mem_is_free;
|
||||
extern unsigned long klimit;
|
||||
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
|
||||
|
@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
|
||||
|
||||
#ifdef CONFIG_PPC_DENORMALISATION
|
||||
mfspr r10,SPRN_HSRR1
|
||||
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
|
||||
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
|
||||
addi r11,r11,-4 /* HSRR0 is next instruction */
|
||||
bne+ denorm_assist
|
||||
#endif
|
||||
|
||||
@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
*/
|
||||
XVCPSGNDP32(32)
|
||||
denorm_done:
|
||||
mfspr r11,SPRN_HSRR0
|
||||
subi r11,r11,4
|
||||
mtspr SPRN_HSRR0,r11
|
||||
mtcrf 0x80,r9
|
||||
ld r9,PACA_EXGEN+EX_R9(r13)
|
||||
|
@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long *hpa, enum dma_data_direction *direction)
|
||||
{
|
||||
long ret;
|
||||
|
||||
ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
||||
|
||||
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
||||
(*direction == DMA_BIDIRECTIONAL))) {
|
||||
struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
|
||||
|
||||
if (likely(pg)) {
|
||||
SetPageDirty(pg);
|
||||
} else {
|
||||
tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
|
||||
#endif
|
||||
|
||||
int iommu_take_ownership(struct iommu_table *tbl)
|
||||
{
|
||||
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
|
||||
|
@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
|
||||
std r1, PACATMSCRATCH(r13)
|
||||
ld r1, PACAR1(r13)
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
std r11, GPR11(r1) /* Temporary stash */
|
||||
|
||||
/*
|
||||
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
|
||||
* clobbered by an exception once we turn on MSR_RI below.
|
||||
*/
|
||||
ld r11, PACATMSCRATCH(r13)
|
||||
std r11, GPR1(r1)
|
||||
|
||||
/*
|
||||
* Store r13 away so we can free up the scratch SPR for the SLB fault
|
||||
* handler (needed once we start accessing the thread_struct).
|
||||
*/
|
||||
GET_SCRATCH0(r11)
|
||||
std r11, GPR13(r1)
|
||||
|
||||
/* Reset MSR RI so we can take SLB faults again */
|
||||
li r11, MSR_RI
|
||||
mtmsrd r11, 1
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
mfspr r11, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
|
||||
@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
|
||||
SAVE_GPR(8, r7) /* user r8 */
|
||||
SAVE_GPR(9, r7) /* user r9 */
|
||||
SAVE_GPR(10, r7) /* user r10 */
|
||||
ld r3, PACATMSCRATCH(r13) /* user r1 */
|
||||
ld r3, GPR1(r1) /* user r1 */
|
||||
ld r4, GPR7(r1) /* user r7 */
|
||||
ld r5, GPR11(r1) /* user r11 */
|
||||
ld r6, GPR12(r1) /* user r12 */
|
||||
GET_SCRATCH0(8) /* user r13 */
|
||||
ld r8, GPR13(r1) /* user r13 */
|
||||
std r3, GPR1(r7)
|
||||
std r4, GPR7(r7)
|
||||
std r5, GPR11(r7)
|
||||
|
@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned long ea, unsigned long dsisr)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long mmu_seq, pte_size;
|
||||
unsigned long gpa, gfn, hva, pfn;
|
||||
unsigned long mmu_seq;
|
||||
unsigned long gpa, gfn, hva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct page *page = NULL;
|
||||
long ret;
|
||||
@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||
if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
|
||||
pfn = page_to_pfn(page);
|
||||
upgrade_write = true;
|
||||
} else {
|
||||
unsigned long pfn;
|
||||
|
||||
/* Call KVM generic code to do the slow-path check */
|
||||
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
|
||||
writing, upgrade_p);
|
||||
@ -639,61 +640,43 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
/* See if we can insert a 1GB or 2MB large PTE here */
|
||||
level = 0;
|
||||
if (page && PageCompound(page)) {
|
||||
pte_size = PAGE_SIZE << compound_order(compound_head(page));
|
||||
if (pte_size >= PUD_SIZE &&
|
||||
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PUD_SIZE - PAGE_SIZE))) {
|
||||
level = 2;
|
||||
pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
|
||||
} else if (pte_size >= PMD_SIZE &&
|
||||
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PMD_SIZE - PAGE_SIZE))) {
|
||||
level = 1;
|
||||
pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
|
||||
/*
|
||||
* Read the PTE from the process' radix tree and use that
|
||||
* so we get the shift and attribute bits.
|
||||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
|
||||
/* Get pte level from shift/size */
|
||||
if (shift == PUD_SHIFT &&
|
||||
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PUD_SIZE - PAGE_SIZE))) {
|
||||
level = 2;
|
||||
} else if (shift == PMD_SHIFT &&
|
||||
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PMD_SIZE - PAGE_SIZE))) {
|
||||
level = 1;
|
||||
} else {
|
||||
level = 0;
|
||||
if (shift > PAGE_SHIFT) {
|
||||
/*
|
||||
* If the pte maps more than one page, bring over
|
||||
* bits from the virtual address to get the real
|
||||
* address of the specific single page we want.
|
||||
*/
|
||||
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
|
||||
pte = __pte(pte_val(pte) | (hva & rpnmask));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the PTE value that we need to insert.
|
||||
*/
|
||||
if (page) {
|
||||
pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
|
||||
_PAGE_ACCESSED;
|
||||
if (writing || upgrade_write)
|
||||
pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
|
||||
pte = pfn_pte(pfn, __pgprot(pgflags));
|
||||
pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
|
||||
if (writing || upgrade_write) {
|
||||
if (pte_val(pte) & _PAGE_WRITE)
|
||||
pte = __pte(pte_val(pte) | _PAGE_DIRTY);
|
||||
} else {
|
||||
/*
|
||||
* Read the PTE from the process' radix tree and use that
|
||||
* so we get the attribute bits.
|
||||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
if (shift == PUD_SHIFT &&
|
||||
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PUD_SIZE - PAGE_SIZE))) {
|
||||
level = 2;
|
||||
} else if (shift == PMD_SHIFT &&
|
||||
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
|
||||
(hva & (PMD_SIZE - PAGE_SIZE))) {
|
||||
level = 1;
|
||||
} else if (shift && shift != PAGE_SHIFT) {
|
||||
/* Adjust PFN */
|
||||
unsigned long mask = (1ul << shift) - PAGE_SIZE;
|
||||
pte = __pte(pte_val(pte) | (hva & mask));
|
||||
}
|
||||
pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
|
||||
if (writing || upgrade_write) {
|
||||
if (pte_val(pte) & _PAGE_WRITE)
|
||||
pte = __pte(pte_val(pte) | _PAGE_DIRTY);
|
||||
} else {
|
||||
pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
|
||||
}
|
||||
pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
|
||||
}
|
||||
|
||||
/* Allocate space in the tree and write the PTE */
|
||||
|
@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long *hpa,
|
||||
enum dma_data_direction *direction)
|
||||
{
|
||||
long ret;
|
||||
|
||||
ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
||||
|
||||
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
||||
(*direction == DMA_BIDIRECTIONAL))) {
|
||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
|
||||
/*
|
||||
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after
|
||||
* calling this so we still get here a valid UA.
|
||||
*/
|
||||
if (pua && *pua)
|
||||
mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
|
||||
unsigned long entry)
|
||||
{
|
||||
unsigned long hpa = 0;
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
|
||||
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
||||
}
|
||||
|
||||
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||
@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
||||
unsigned long hpa = 0;
|
||||
long ret;
|
||||
|
||||
if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
|
||||
if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
|
||||
/*
|
||||
* real mode xchg can fail if struct page crosses
|
||||
* a page boundary
|
||||
@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
||||
|
||||
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||
if (ret)
|
||||
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
||||
return H_CLOSED;
|
||||
|
||||
ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
||||
if (ret) {
|
||||
mm_iommu_mapped_dec(mem);
|
||||
/*
|
||||
@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry, tce);
|
||||
@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
|
||||
addc r0, r8, r9
|
||||
ld r10, 0(r4)
|
||||
ld r11, 8(r4)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
rotldi r5, r5, 8
|
||||
#endif
|
||||
adde r0, r0, r10
|
||||
add r5, r5, r7
|
||||
adde r0, r0, r11
|
||||
|
@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Make sure we aren't patching a freed init section */
|
||||
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
|
||||
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__put_user_size(instr, patch_addr, 4, err);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* We do not have access to the sparsemem vmemmap, so we fallback to
|
||||
* walking the list of sparsemem blocks which we already maintain for
|
||||
* the sake of crashdump. In the long run, we might want to maintain
|
||||
* a tree if performance of that linear walk becomes a problem.
|
||||
*
|
||||
* realmode_pfn_to_page functions can fail due to:
|
||||
* 1) As real sparsemem blocks do not lay in RAM continously (they
|
||||
* are in virtual address space which is not available in the real mode),
|
||||
* the requested page struct can be split between blocks so get_page/put_page
|
||||
* may fail.
|
||||
* 2) When huge pages are used, the get_page/put_page API will fail
|
||||
* in real mode as the linked addresses in the page struct are virtual
|
||||
* too.
|
||||
*/
|
||||
struct page *realmode_pfn_to_page(unsigned long pfn)
|
||||
{
|
||||
struct vmemmap_backing *vmem_back;
|
||||
struct page *page;
|
||||
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
|
||||
unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
|
||||
|
||||
for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
|
||||
if (pg_va < vmem_back->virt_addr)
|
||||
continue;
|
||||
|
||||
/* After vmemmap_list entry free is possible, need check all */
|
||||
if ((pg_va + sizeof(struct page)) <=
|
||||
(vmem_back->virt_addr + page_size)) {
|
||||
page = (struct page *) (vmem_back->phys + pg_va -
|
||||
vmem_back->virt_addr);
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
/* Probably that page struct is split between real pages */
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
|
||||
|
||||
#else
|
||||
|
||||
struct page *realmode_pfn_to_page(unsigned long pfn)
|
||||
{
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
@ -63,6 +63,7 @@
|
||||
#endif
|
||||
|
||||
unsigned long long memory_limit;
|
||||
bool init_mem_is_free;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
@ -396,6 +397,7 @@ void free_initmem(void)
|
||||
{
|
||||
ppc_md.progress = ppc_printk_progress;
|
||||
mark_initmem_nx();
|
||||
init_mem_is_free = true;
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
|
@ -18,11 +18,15 @@
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
static DEFINE_MUTEX(mem_list_mutex);
|
||||
|
||||
#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
|
||||
#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
|
||||
|
||||
struct mm_iommu_table_group_mem_t {
|
||||
struct list_head next;
|
||||
struct rcu_head rcu;
|
||||
@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
|
||||
SetPageDirty(page);
|
||||
|
||||
put_page(page);
|
||||
mem->hpas[i] = 0;
|
||||
}
|
||||
@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
|
||||
|
||||
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||
unsigned long ua, unsigned long entries)
|
||||
@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||
if (pageshift > mem->pageshift)
|
||||
return -EFAULT;
|
||||
|
||||
*hpa = *va | (ua & ~PAGE_MASK);
|
||||
*hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||
if (!pa)
|
||||
return -EFAULT;
|
||||
|
||||
*hpa = *pa | (ua & ~PAGE_MASK);
|
||||
*hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
|
||||
|
||||
extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
long entry;
|
||||
void *va;
|
||||
unsigned long *pa;
|
||||
|
||||
mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
|
||||
if (!mem)
|
||||
return;
|
||||
|
||||
entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||
va = &mem->hpas[entry];
|
||||
|
||||
pa = (void *) vmalloc_to_phys(va);
|
||||
if (!pa)
|
||||
return;
|
||||
|
||||
*pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
|
||||
}
|
||||
|
||||
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
|
||||
{
|
||||
|
@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
|
||||
int new_nid;
|
||||
|
||||
/* Use associativity from first thread for all siblings */
|
||||
vphn_get_associativity(cpu, associativity);
|
||||
if (vphn_get_associativity(cpu, associativity))
|
||||
return cpu_to_node(cpu);
|
||||
|
||||
new_nid = associativity_to_nid(associativity);
|
||||
if (new_nid < 0 || !node_possible(new_nid))
|
||||
new_nid = first_online_node;
|
||||
@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
|
||||
|
||||
static void reset_topology_timer(void)
|
||||
{
|
||||
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
|
||||
if (vphn_enabled)
|
||||
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
|
||||
* Since any pkey can be used for data or execute, we will just treat
|
||||
* all keys as equal and track them as one entity.
|
||||
*/
|
||||
pkeys_total = be32_to_cpu(vals[0]);
|
||||
pkeys_total = vals[0];
|
||||
pkeys_devtree_defined = true;
|
||||
}
|
||||
|
||||
|
@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
||||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
if ((level_shift - 3) * levels + page_shift >= 55)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
|
7
arch/riscv/include/asm/asm-prototypes.h
Normal file
7
arch/riscv/include/asm/asm-prototypes.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_RISCV_PROTOTYPES_H
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
@ -85,15 +85,8 @@ atomic_t hart_lottery;
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
static void __init setup_initrd(void)
|
||||
{
|
||||
extern char __initramfs_start[];
|
||||
extern unsigned long __initramfs_size;
|
||||
unsigned long size;
|
||||
|
||||
if (__initramfs_size > 0) {
|
||||
initrd_start = (unsigned long)(&__initramfs_start);
|
||||
initrd_end = initrd_start + __initramfs_size;
|
||||
}
|
||||
|
||||
if (initrd_start >= initrd_end) {
|
||||
printk(KERN_INFO "initrd not found or empty");
|
||||
goto disable;
|
||||
|
@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
if (k)
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - k);
|
||||
if (n < k) {
|
||||
if (k < n) {
|
||||
if (__cbc_paes_set_key(ctx) != 0)
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
|
||||
|
@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
break;
|
||||
case KVM_CAP_S390_HPAGE_1M:
|
||||
r = 0;
|
||||
if (hpage)
|
||||
if (hpage && !kvm_is_ucontrol(kvm))
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_S390_MEM_OP:
|
||||
@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
mutex_lock(&kvm->lock);
|
||||
if (kvm->created_vcpus)
|
||||
r = -EBUSY;
|
||||
else if (!hpage || kvm->arch.use_cmma)
|
||||
else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
|
||||
r = -EINVAL;
|
||||
else {
|
||||
r = 0;
|
||||
|
@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
|
||||
vmaddr |= gaddr & ~PMD_MASK;
|
||||
/* Find vma in the parent mm */
|
||||
vma = find_vma(gmap->mm, vmaddr);
|
||||
if (!vma)
|
||||
continue;
|
||||
/*
|
||||
* We do not discard pages that are backed by
|
||||
* hugetlbfs, so we don't have to refault them.
|
||||
*/
|
||||
if (vma && is_vm_hugetlb_page(vma))
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
continue;
|
||||
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
|
||||
zap_page_range(vma, vmaddr, size);
|
||||
|
@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
|
||||
push %ebx
|
||||
push %ecx
|
||||
push %edx
|
||||
push %edi
|
||||
|
||||
/*
|
||||
* RIP-relative addressing is needed to access the encryption bit
|
||||
* variable. Since we are running in 32-bit mode we need this call/pop
|
||||
* sequence to get the proper relative addressing.
|
||||
*/
|
||||
call 1f
|
||||
1: popl %edi
|
||||
subl $1b, %edi
|
||||
|
||||
movl enc_bit(%edi), %eax
|
||||
cmpl $0, %eax
|
||||
jge .Lsev_exit
|
||||
|
||||
/* Check if running under a hypervisor */
|
||||
movl $1, %eax
|
||||
@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
|
||||
|
||||
movl %ebx, %eax
|
||||
andl $0x3f, %eax /* Return the encryption bit location */
|
||||
movl %eax, enc_bit(%edi)
|
||||
jmp .Lsev_exit
|
||||
|
||||
.Lno_sev:
|
||||
xor %eax, %eax
|
||||
movl %eax, enc_bit(%edi)
|
||||
|
||||
.Lsev_exit:
|
||||
pop %edi
|
||||
pop %edx
|
||||
pop %ecx
|
||||
pop %ebx
|
||||
@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
|
||||
ENDPROC(set_sev_encryption_mask)
|
||||
|
||||
.data
|
||||
enc_bit:
|
||||
.int 0xffffffff
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
.balign 8
|
||||
|
@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
!boot_cpu_has(X86_FEATURE_AES) ||
|
||||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
|
||||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
!boot_cpu_has(X86_FEATURE_AES) ||
|
||||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
|
||||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
!boot_cpu_has(X86_FEATURE_AES) ||
|
||||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
|
||||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
|
||||
static int __init crypto_morus1280_sse2_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
|
||||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
|
||||
static int __init crypto_morus640_sse2_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
|
||||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
|
||||
|
||||
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
|
||||
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
|
||||
|
||||
/* Knights Landing does have MISPREDICT bit */
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
|
||||
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
|
||||
}
|
||||
|
@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
|
||||
*/
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
{
|
||||
struct ipi_arg_ex **arg;
|
||||
struct ipi_arg_ex *ipi_arg;
|
||||
struct hv_send_ipi_ex **arg;
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
unsigned long flags;
|
||||
int nr_bank = 0;
|
||||
int ret = 1;
|
||||
@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
return false;
|
||||
|
||||
local_irq_save(flags);
|
||||
arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
ipi_arg = *arg;
|
||||
if (unlikely(!ipi_arg))
|
||||
@ -135,7 +135,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
int cur_cpu, vcpu;
|
||||
struct ipi_arg_non_ex ipi_arg;
|
||||
struct hv_send_ipi ipi_arg;
|
||||
int ret = 1;
|
||||
|
||||
trace_hyperv_send_ipi_mask(mask, vector);
|
||||
|
@ -14,6 +14,16 @@
|
||||
#ifndef _ASM_X86_FIXMAP_H
|
||||
#define _ASM_X86_FIXMAP_H
|
||||
|
||||
/*
|
||||
* Exposed to assembly code for setting up initial page tables. Cannot be
|
||||
* calculated in assembly code (fixmap entries are an enum), but is sanity
|
||||
* checked in the actual fixmap C code to make sure that the fixmap is
|
||||
* covered fully.
|
||||
*/
|
||||
#define FIXMAP_PMD_NUM 2
|
||||
/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
|
||||
#define FIXMAP_PMD_TOP 507
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/acpi.h>
|
||||
|
@ -726,19 +726,21 @@ struct hv_enlightened_vmcs {
|
||||
#define HV_STIMER_AUTOENABLE (1ULL << 3)
|
||||
#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
|
||||
|
||||
struct ipi_arg_non_ex {
|
||||
u32 vector;
|
||||
u32 reserved;
|
||||
u64 cpu_mask;
|
||||
};
|
||||
|
||||
struct hv_vpset {
|
||||
u64 format;
|
||||
u64 valid_bank_mask;
|
||||
u64 bank_contents[];
|
||||
};
|
||||
|
||||
struct ipi_arg_ex {
|
||||
/* HvCallSendSyntheticClusterIpi hypercall */
|
||||
struct hv_send_ipi {
|
||||
u32 vector;
|
||||
u32 reserved;
|
||||
u64 cpu_mask;
|
||||
};
|
||||
|
||||
/* HvCallSendSyntheticClusterIpiEx hypercall */
|
||||
struct hv_send_ipi_ex {
|
||||
u32 vector;
|
||||
u32 reserved;
|
||||
struct hv_vpset vp_set;
|
||||
|
@ -869,6 +869,8 @@ struct kvm_arch {
|
||||
|
||||
bool x2apic_format;
|
||||
bool x2apic_broadcast_quirk_disabled;
|
||||
|
||||
bool guest_can_read_msr_platform_info;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
@ -1022,6 +1024,7 @@ struct kvm_x86_ops {
|
||||
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
|
||||
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
|
||||
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
|
||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
||||
@ -1055,6 +1058,7 @@ struct kvm_x86_ops {
|
||||
bool (*umip_emulated)(void);
|
||||
|
||||
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
|
||||
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
|
||||
|
||||
@ -1482,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_is_in_guest(void);
|
||||
|
||||
|
@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
|
||||
|
||||
/* Architecture __weak replacement functions */
|
||||
void __init mem_encrypt_init(void);
|
||||
void __init mem_encrypt_free_decrypted_mem(void);
|
||||
|
||||
bool sme_active(void);
|
||||
bool sev_active(void);
|
||||
|
||||
#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
|
||||
|
||||
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
#define sme_me_mask 0ULL
|
||||
@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
|
||||
static inline int __init
|
||||
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
|
||||
|
||||
#define __bss_decrypted
|
||||
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
/*
|
||||
@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
|
||||
#define __sme_pa(x) (__pa(x) | sme_me_mask)
|
||||
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
|
||||
|
||||
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __X86_MEM_ENCRYPT_H__ */
|
||||
|
@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
|
||||
|
||||
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
|
||||
#endif
|
||||
*pmdp = pmd;
|
||||
}
|
||||
|
||||
@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
|
||||
#endif
|
||||
return __pmd(xchg((pmdval_t *)xp, 0));
|
||||
}
|
||||
#else
|
||||
@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
|
||||
#endif
|
||||
return __pud(xchg((pudval_t *)xp, 0));
|
||||
}
|
||||
#else
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern p4d_t level4_kernel_pgt[512];
|
||||
extern p4d_t level4_ident_pgt[512];
|
||||
@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
|
||||
extern pmd_t level2_kernel_pgt[512];
|
||||
extern pmd_t level2_fixmap_pgt[512];
|
||||
extern pmd_t level2_ident_pgt[512];
|
||||
extern pte_t level1_fixmap_pgt[512];
|
||||
extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
|
||||
extern pgd_t init_top_pgt[];
|
||||
|
||||
#define swapper_pg_dir init_top_pgt
|
||||
|
@ -377,6 +377,7 @@ struct kvm_sync_regs {
|
||||
|
||||
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
|
||||
|
||||
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
||||
|
@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int proc_apm_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned short bx;
|
||||
@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
|
||||
units);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int apm(void *unused)
|
||||
{
|
||||
|
@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
|
||||
e <= QOS_L3_MBM_LOCAL_EVENT_ID);
|
||||
}
|
||||
|
||||
struct rdt_parse_data {
|
||||
struct rdtgroup *rdtgrp;
|
||||
char *buf;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdt_resource - attributes of an RDT resource
|
||||
* @rid: The index of the resource
|
||||
@ -423,16 +428,19 @@ struct rdt_resource {
|
||||
struct rdt_cache cache;
|
||||
struct rdt_membw membw;
|
||||
const char *format_str;
|
||||
int (*parse_ctrlval) (void *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
int (*parse_ctrlval)(struct rdt_parse_data *data,
|
||||
struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
struct list_head evt_list;
|
||||
int num_rmid;
|
||||
unsigned int mon_scale;
|
||||
unsigned long fflags;
|
||||
};
|
||||
|
||||
int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
|
||||
int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
|
||||
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
|
||||
extern struct mutex rdtgroup_mutex;
|
||||
|
||||
@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
|
||||
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
|
||||
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
|
||||
int update_domains(struct rdt_resource *r, int closid);
|
||||
int closids_supported(void);
|
||||
void closid_free(int closid);
|
||||
int alloc_rmid(void);
|
||||
void free_rmid(u32 rmid);
|
||||
|
@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
|
||||
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
unsigned long data;
|
||||
char *buf = _buf;
|
||||
unsigned long bw_val;
|
||||
|
||||
if (d->have_new_ctrl) {
|
||||
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!bw_validate(buf, &data, r))
|
||||
if (!bw_validate(data->buf, &bw_val, r))
|
||||
return -EINVAL;
|
||||
d->new_ctrl = data;
|
||||
d->new_ctrl = bw_val;
|
||||
d->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct rdt_cbm_parse_data {
|
||||
struct rdtgroup *rdtgrp;
|
||||
char *buf;
|
||||
};
|
||||
|
||||
/*
|
||||
* Read one cache bit mask (hex). Check that it is valid for the current
|
||||
* resource type.
|
||||
*/
|
||||
int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
|
||||
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
struct rdt_cbm_parse_data *data = _data;
|
||||
struct rdtgroup *rdtgrp = data->rdtgrp;
|
||||
u32 cbm_val;
|
||||
|
||||
@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
|
||||
static int parse_line(char *line, struct rdt_resource *r,
|
||||
struct rdtgroup *rdtgrp)
|
||||
{
|
||||
struct rdt_cbm_parse_data data;
|
||||
struct rdt_parse_data data;
|
||||
char *dom = NULL, *id;
|
||||
struct rdt_domain *d;
|
||||
unsigned long dom_id;
|
||||
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
|
||||
r->rid == RDT_RESOURCE_MBA) {
|
||||
rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
next:
|
||||
if (!line || line[0] == '\0')
|
||||
return 0;
|
||||
|
@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
|
||||
* limited as the number of resources grows.
|
||||
*/
|
||||
static int closid_free_map;
|
||||
static int closid_free_map_len;
|
||||
|
||||
int closids_supported(void)
|
||||
{
|
||||
return closid_free_map_len;
|
||||
}
|
||||
|
||||
static void closid_init(void)
|
||||
{
|
||||
@ -111,6 +117,7 @@ static void closid_init(void)
|
||||
|
||||
/* CLOSID 0 is always reserved for the default group */
|
||||
closid_free_map &= ~1;
|
||||
closid_free_map_len = rdt_min_closid;
|
||||
}
|
||||
|
||||
static int closid_alloc(void)
|
||||
@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
|
||||
sw_shareable = 0;
|
||||
exclusive = 0;
|
||||
seq_printf(seq, "%d=", dom->id);
|
||||
for (i = 0; i < r->num_closid; i++, ctrl++) {
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
if (!closid_allocated(i))
|
||||
continue;
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||
|
||||
/* Check for overlap with other resource groups */
|
||||
ctrl = d->ctrl_val;
|
||||
for (i = 0; i < r->num_closid; i++, ctrl++) {
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
ctrl_b = (unsigned long *)ctrl;
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
if (closid_allocated(i) && i != closid &&
|
||||
@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
|
||||
{
|
||||
int closid = rdtgrp->closid;
|
||||
struct rdt_resource *r;
|
||||
bool has_cache = false;
|
||||
struct rdt_domain *d;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
has_cache = true;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
|
||||
rdtgrp->closid, false))
|
||||
rdtgrp->closid, false)) {
|
||||
rdt_last_cmd_puts("schemata overlaps\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_cache) {
|
||||
rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
|
||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||
} else if (!strcmp(buf, "exclusive")) {
|
||||
if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
|
||||
rdt_last_cmd_printf("schemata overlaps\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
unsigned int size;
|
||||
bool sep = false;
|
||||
u32 cbm;
|
||||
bool sep;
|
||||
u32 ctrl;
|
||||
|
||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||
if (!rdtgrp) {
|
||||
@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
}
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
sep = false;
|
||||
seq_printf(s, "%*s:", max_name_width, r->name);
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (sep)
|
||||
@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
size = 0;
|
||||
} else {
|
||||
cbm = d->ctrl_val[rdtgrp->closid];
|
||||
size = rdtgroup_cbm_to_size(r, d, cbm);
|
||||
ctrl = (!is_mba_sc(r) ?
|
||||
d->ctrl_val[rdtgrp->closid] :
|
||||
d->mbps_val[rdtgrp->closid]);
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
size = ctrl;
|
||||
else
|
||||
size = rdtgroup_cbm_to_size(r, d, ctrl);
|
||||
}
|
||||
seq_printf(s, "%d=%u", d->id, size);
|
||||
sep = true;
|
||||
@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
u32 *ctrl;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
/*
|
||||
* Only initialize default allocations for CBM cache
|
||||
* resources
|
||||
*/
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
d->have_new_ctrl = false;
|
||||
d->new_ctrl = r->cache.shareable_bits;
|
||||
used_b = r->cache.shareable_bits;
|
||||
ctrl = d->ctrl_val;
|
||||
for (i = 0; i < r->num_closid; i++, ctrl++) {
|
||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||
if (closid_allocated(i) && i != closid) {
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
||||
@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
}
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
/*
|
||||
* Only initialize default allocations for CBM cache
|
||||
* resources
|
||||
*/
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
ret = update_domains(r, rdtgrp->closid);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("failed to initialize allocations\n");
|
||||
|
@ -7,11 +7,17 @@
|
||||
#include <linux/eisa.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
|
||||
static __init int eisa_bus_probe(void)
|
||||
{
|
||||
void __iomem *p = ioremap(0x0FFFD9, 4);
|
||||
void __iomem *p;
|
||||
|
||||
if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
|
||||
if (xen_pv_domain() && !xen_initial_domain())
|
||||
return 0;
|
||||
|
||||
p = ioremap(0x0FFFD9, 4);
|
||||
if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
|
||||
EISA_bus = 1;
|
||||
iounmap(p);
|
||||
return 0;
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/bootparam_utils.h>
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
/*
|
||||
* Manage page tables very early on.
|
||||
@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
|
||||
unsigned long __head __startup_64(unsigned long physaddr,
|
||||
struct boot_params *bp)
|
||||
{
|
||||
unsigned long vaddr, vaddr_end;
|
||||
unsigned long load_delta, *p;
|
||||
unsigned long pgtable_flags;
|
||||
pgdval_t *pgd;
|
||||
@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||
pud[511] += load_delta;
|
||||
|
||||
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
|
||||
pmd[506] += load_delta;
|
||||
for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
|
||||
pmd[i] += load_delta;
|
||||
|
||||
/*
|
||||
* Set up the identity mapping for the switchover. These
|
||||
@ -234,6 +237,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||
/* Encrypt the kernel and related (if SME is active) */
|
||||
sme_encrypt_kernel(bp);
|
||||
|
||||
/*
|
||||
* Clear the memory encryption mask from the .bss..decrypted section.
|
||||
* The bss section will be memset to zero later in the initialization so
|
||||
* there is no need to zero it after changing the memory encryption
|
||||
* attribute.
|
||||
*/
|
||||
if (mem_encrypt_active()) {
|
||||
vaddr = (unsigned long)__start_bss_decrypted;
|
||||
vaddr_end = (unsigned long)__end_bss_decrypted;
|
||||
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
|
||||
i = pmd_index(vaddr);
|
||||
pmd[i] -= sme_get_me_mask();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the SME encryption mask (if SME is active) to be used as a
|
||||
* modifier for the initial pgdir entry programmed into CR3.
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "../entry/calling.h"
|
||||
#include <asm/export.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/asm-offsets.h>
|
||||
@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
|
||||
KERNEL_IMAGE_SIZE/PMD_SIZE)
|
||||
|
||||
NEXT_PAGE(level2_fixmap_pgt)
|
||||
.fill 506,8,0
|
||||
.quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
|
||||
/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
|
||||
.fill 5,8,0
|
||||
.fill (512 - 4 - FIXMAP_PMD_NUM),8,0
|
||||
pgtno = 0
|
||||
.rept (FIXMAP_PMD_NUM)
|
||||
.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
|
||||
+ _PAGE_TABLE_NOENC;
|
||||
pgtno = pgtno + 1
|
||||
.endr
|
||||
/* 6 MB reserved space + a 2MB hole */
|
||||
.fill 4,8,0
|
||||
|
||||
NEXT_PAGE(level1_fixmap_pgt)
|
||||
.rept (FIXMAP_PMD_NUM)
|
||||
.fill 512,8,0
|
||||
.endr
|
||||
|
||||
#undef PMDS
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/set_memory.h>
|
||||
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/mem_encrypt.h>
|
||||
@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
|
||||
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
|
||||
|
||||
static struct pvclock_vsyscall_time_info
|
||||
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
|
||||
static struct pvclock_wall_clock wall_clock;
|
||||
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
|
||||
static struct pvclock_wall_clock wall_clock __bss_decrypted;
|
||||
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
static struct pvclock_vsyscall_time_info *hvclock_mem;
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
@ -236,6 +238,45 @@ static void kvm_shutdown(void)
|
||||
native_machine_shutdown();
|
||||
}
|
||||
|
||||
static void __init kvmclock_init_mem(void)
|
||||
{
|
||||
unsigned long ncpus;
|
||||
unsigned int order;
|
||||
struct page *p;
|
||||
int r;
|
||||
|
||||
if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
|
||||
return;
|
||||
|
||||
ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
|
||||
order = get_order(ncpus * sizeof(*hvclock_mem));
|
||||
|
||||
p = alloc_pages(GFP_KERNEL, order);
|
||||
if (!p) {
|
||||
pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
|
||||
return;
|
||||
}
|
||||
|
||||
hvclock_mem = page_address(p);
|
||||
|
||||
/*
|
||||
* hvclock is shared between the guest and the hypervisor, must
|
||||
* be mapped decrypted.
|
||||
*/
|
||||
if (sev_active()) {
|
||||
r = set_memory_decrypted((unsigned long) hvclock_mem,
|
||||
1UL << order);
|
||||
if (r) {
|
||||
__free_pages(p, order);
|
||||
hvclock_mem = NULL;
|
||||
pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
memset(hvclock_mem, 0, PAGE_SIZE << order);
|
||||
}
|
||||
|
||||
static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
|
||||
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
|
||||
#endif
|
||||
|
||||
kvmclock_init_mem();
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_setup_vsyscall_timeinfo);
|
||||
@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
|
||||
/* Use the static page for the first CPUs, allocate otherwise */
|
||||
if (cpu < HVC_BOOT_ARRAY_SIZE)
|
||||
p = &hv_clock_boot[cpu];
|
||||
else if (hvclock_mem)
|
||||
p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
|
||||
else
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
return -ENOMEM;
|
||||
|
||||
per_cpu(hv_clock_per_cpu, cpu) = p;
|
||||
return p ? 0 : -ENOMEM;
|
||||
|
@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf,
|
||||
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
|
||||
WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
||||
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
|
||||
WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
|
@ -111,8 +111,10 @@ int arch_register_cpu(int num)
|
||||
/*
|
||||
* Currently CPU0 is only hotpluggable on Intel platforms. Other
|
||||
* vendors can add hotplug support later.
|
||||
* Xen PV guests don't support CPU0 hotplug at all.
|
||||
*/
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL)
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL ||
|
||||
boot_cpu_has(X86_FEATURE_XENPV))
|
||||
cpu0_hotpluggable = 0;
|
||||
|
||||
/*
|
||||
|
@ -65,6 +65,23 @@ jiffies_64 = jiffies;
|
||||
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
|
||||
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
|
||||
|
||||
/*
|
||||
* This section contains data which will be mapped as decrypted. Memory
|
||||
* encryption operates on a page basis. Make this section PMD-aligned
|
||||
* to avoid splitting the pages while mapping the section early.
|
||||
*
|
||||
* Note: We use a separate section so that only this section gets
|
||||
* decrypted to avoid exposing more than we wish.
|
||||
*/
|
||||
#define BSS_DECRYPTED \
|
||||
. = ALIGN(PMD_SIZE); \
|
||||
__start_bss_decrypted = .; \
|
||||
*(.bss..decrypted); \
|
||||
. = ALIGN(PAGE_SIZE); \
|
||||
__start_bss_decrypted_unused = .; \
|
||||
. = ALIGN(PMD_SIZE); \
|
||||
__end_bss_decrypted = .; \
|
||||
|
||||
#else
|
||||
|
||||
#define X86_ALIGN_RODATA_BEGIN
|
||||
@ -74,6 +91,7 @@ jiffies_64 = jiffies;
|
||||
|
||||
#define ALIGN_ENTRY_TEXT_BEGIN
|
||||
#define ALIGN_ENTRY_TEXT_END
|
||||
#define BSS_DECRYPTED
|
||||
|
||||
#endif
|
||||
|
||||
@ -355,6 +373,7 @@ SECTIONS
|
||||
__bss_start = .;
|
||||
*(.bss..page_aligned)
|
||||
*(.bss)
|
||||
BSS_DECRYPTED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__bss_stop = .;
|
||||
}
|
||||
|
@ -1344,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
|
||||
|
||||
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
|
||||
{
|
||||
return kvm_apic_hw_enabled(apic) &&
|
||||
addr >= apic->base_address &&
|
||||
addr < apic->base_address + LAPIC_MMIO_LENGTH;
|
||||
return addr >= apic->base_address &&
|
||||
addr < apic->base_address + LAPIC_MMIO_LENGTH;
|
||||
}
|
||||
|
||||
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
@ -1358,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
if (!apic_mmio_in_range(apic, address))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
|
||||
if (!kvm_check_has_quirk(vcpu->kvm,
|
||||
KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memset(data, 0xff, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kvm_lapic_reg_read(apic, offset, len, data);
|
||||
|
||||
return 0;
|
||||
@ -1917,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
if (!apic_mmio_in_range(apic, address))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
|
||||
if (!kvm_check_has_quirk(vcpu->kvm,
|
||||
KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* APIC register must be aligned on 128-bits boundary.
|
||||
* 32/64/128 bits registers must be accessed thru 32 bits.
|
||||
|
@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Make sure the write to vcpu->mode is not reordered in front of
|
||||
* reads to sptes. If it does, kvm_commit_zap_page() can see us
|
||||
* reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
|
||||
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
|
||||
*/
|
||||
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||
@ -5417,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
||||
|
||||
kvm_init_mmu(vcpu, true);
|
||||
/*
|
||||
* kvm_mmu_setup() is called only on vCPU initialization.
|
||||
* Therefore, no need to reset mmu roots as they are not yet
|
||||
* initialized.
|
||||
*/
|
||||
kvm_init_mmu(vcpu, false);
|
||||
}
|
||||
|
||||
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
|
||||
|
@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void)
|
||||
min_sev_asid = cpuid_edx(0x8000001F);
|
||||
|
||||
/* Initialize SEV ASID bitmap */
|
||||
sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
|
||||
if (!sev_asid_bitmap)
|
||||
return 1;
|
||||
|
||||
@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void)
|
||||
int cpu;
|
||||
|
||||
if (svm_sev_enabled())
|
||||
kfree(sev_asid_bitmap);
|
||||
bitmap_free(sev_asid_bitmap);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
svm_cpu_uninit(cpu);
|
||||
@ -7149,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
||||
.check_intercept = svm_check_intercept,
|
||||
.handle_external_intr = svm_handle_external_intr,
|
||||
|
||||
.request_immediate_exit = __kvm_request_immediate_exit,
|
||||
|
||||
.sched_in = svm_sched_in,
|
||||
|
||||
.pmu_ops = &amd_pmu_ops,
|
||||
|
@ -397,6 +397,7 @@ struct loaded_vmcs {
|
||||
int cpu;
|
||||
bool launched;
|
||||
bool nmi_known_unmasked;
|
||||
bool hv_timer_armed;
|
||||
/* Support for vnmi-less CPUs */
|
||||
int soft_vnmi_blocked;
|
||||
ktime_t entry_time;
|
||||
@ -1019,6 +1020,8 @@ struct vcpu_vmx {
|
||||
int ple_window;
|
||||
bool ple_window_dirty;
|
||||
|
||||
bool req_immediate_exit;
|
||||
|
||||
/* Support for PML */
|
||||
#define PML_ENTITY_NUM 512
|
||||
struct page *pml_pg;
|
||||
@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
u16 fs_sel, gs_sel;
|
||||
int i;
|
||||
|
||||
vmx->req_immediate_exit = false;
|
||||
|
||||
if (vmx->loaded_cpu_state)
|
||||
return;
|
||||
|
||||
@ -5393,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
* To use VMXON (and later other VMX instructions), a guest
|
||||
* must first be able to turn on cr4.VMXE (see handle_vmon()).
|
||||
* So basically the check on whether to allow nested VMX
|
||||
* is here.
|
||||
* is here. We operate under the default treatment of SMM,
|
||||
* so VMX cannot be enabled under SMM.
|
||||
*/
|
||||
if (!nested_vmx_allowed(vcpu))
|
||||
if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -6183,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
||||
nested_mark_vmcs12_pages_dirty(vcpu);
|
||||
}
|
||||
|
||||
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
void *vapic_page;
|
||||
u32 vppr;
|
||||
int rvi;
|
||||
|
||||
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
|
||||
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
|
||||
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
|
||||
return false;
|
||||
|
||||
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||
|
||||
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
||||
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
||||
kunmap(vmx->nested.virtual_apic_page);
|
||||
|
||||
return ((rvi & 0xf0) > (vppr & 0xf0));
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
|
||||
bool nested)
|
||||
{
|
||||
@ -7966,6 +7993,9 @@ static __init int hardware_setup(void)
|
||||
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_preemption_timer())
|
||||
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
|
||||
|
||||
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
|
||||
u64 vmx_msr;
|
||||
|
||||
@ -9208,7 +9238,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_preemption_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_lapic_expired_hv_timer(vcpu);
|
||||
if (!to_vmx(vcpu)->req_immediate_exit)
|
||||
kvm_lapic_expired_hv_timer(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -10595,24 +10626,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||||
msrs[i].host, false);
|
||||
}
|
||||
|
||||
static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
|
||||
static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
|
||||
{
|
||||
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
|
||||
if (!vmx->loaded_vmcs->hv_timer_armed)
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
vmx->loaded_vmcs->hv_timer_armed = true;
|
||||
}
|
||||
|
||||
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
u64 tscl;
|
||||
u32 delta_tsc;
|
||||
|
||||
if (vmx->hv_deadline_tsc == -1)
|
||||
if (vmx->req_immediate_exit) {
|
||||
vmx_arm_hv_timer(vmx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
tscl = rdtsc();
|
||||
if (vmx->hv_deadline_tsc > tscl)
|
||||
/* sure to be 32 bit only because checked on set_hv_timer */
|
||||
delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
|
||||
cpu_preemption_timer_multi);
|
||||
else
|
||||
delta_tsc = 0;
|
||||
if (vmx->hv_deadline_tsc != -1) {
|
||||
tscl = rdtsc();
|
||||
if (vmx->hv_deadline_tsc > tscl)
|
||||
/* set_hv_timer ensures the delta fits in 32-bits */
|
||||
delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
|
||||
cpu_preemption_timer_multi);
|
||||
else
|
||||
delta_tsc = 0;
|
||||
|
||||
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
|
||||
vmx_arm_hv_timer(vmx, delta_tsc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vmx->loaded_vmcs->hv_timer_armed)
|
||||
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
vmx->loaded_vmcs->hv_timer_armed = false;
|
||||
}
|
||||
|
||||
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
@ -10672,7 +10722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
|
||||
vmx_arm_hv_timer(vcpu);
|
||||
vmx_update_hv_timer(vcpu);
|
||||
|
||||
/*
|
||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||
@ -11427,16 +11477,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
|
||||
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (vcpu->arch.virtual_tsc_khz == 0)
|
||||
return;
|
||||
|
||||
/* Make sure short timeouts reliably trigger an immediate vmexit.
|
||||
* hrtimer_start does not guarantee this. */
|
||||
if (preemption_timeout <= 1) {
|
||||
/*
|
||||
* A timer value of zero is architecturally guaranteed to cause
|
||||
* a VMExit prior to executing any instructions in the guest.
|
||||
*/
|
||||
if (preemption_timeout == 0) {
|
||||
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vcpu->arch.virtual_tsc_khz == 0)
|
||||
return;
|
||||
|
||||
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
|
||||
preemption_timeout *= 1000000;
|
||||
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
|
||||
@ -11646,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
||||
* bits 15:8 should be zero in posted_intr_nv,
|
||||
* the descriptor address has been already checked
|
||||
* in nested_get_vmcs12_pages.
|
||||
*
|
||||
* bits 5:0 of posted_intr_desc_addr should be zero.
|
||||
*/
|
||||
if (nested_cpu_has_posted_intr(vmcs12) &&
|
||||
(!nested_cpu_has_vid(vmcs12) ||
|
||||
!nested_exit_intr_ack_set(vcpu) ||
|
||||
vmcs12->posted_intr_nv & 0xff00))
|
||||
(vmcs12->posted_intr_nv & 0xff00) ||
|
||||
(vmcs12->posted_intr_desc_addr & 0x3f) ||
|
||||
(!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
|
||||
return -EINVAL;
|
||||
|
||||
/* tpr shadow is needed by all apicv features. */
|
||||
@ -12076,11 +12132,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
||||
|
||||
exec_control = vmcs12->pin_based_vm_exec_control;
|
||||
|
||||
/* Preemption timer setting is only taken from vmcs01. */
|
||||
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
|
||||
/* Preemption timer setting is computed directly in vmx_vcpu_run. */
|
||||
exec_control |= vmcs_config.pin_based_exec_ctrl;
|
||||
if (vmx->hv_deadline_tsc == -1)
|
||||
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
|
||||
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
|
||||
vmx->loaded_vmcs->hv_timer_armed = false;
|
||||
|
||||
/* Posted interrupts setting is only taken from vmcs12. */
|
||||
if (nested_cpu_has_posted_intr(vmcs12)) {
|
||||
@ -12318,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
@ -12863,6 +12921,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
to_vmx(vcpu)->req_immediate_exit = true;
|
||||
}
|
||||
|
||||
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ktime_t remaining =
|
||||
@ -13253,12 +13316,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
||||
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
||||
if (vmx->hv_deadline_tsc == -1)
|
||||
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
else
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
@ -13462,18 +13520,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
|
||||
return -ERANGE;
|
||||
|
||||
vmx->hv_deadline_tsc = tscl + delta_tsc;
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
|
||||
return delta_tsc == 0;
|
||||
}
|
||||
|
||||
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
vmx->hv_deadline_tsc = -1;
|
||||
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
to_vmx(vcpu)->hv_deadline_tsc = -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -13954,6 +14006,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* SMM temporarily disables VMX, so we cannot be in guest mode,
|
||||
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
|
||||
* must be zero.
|
||||
*/
|
||||
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
|
||||
return -EINVAL;
|
||||
|
||||
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
|
||||
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
|
||||
return -EINVAL;
|
||||
@ -14097,6 +14157,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.apicv_post_state_restore = vmx_apicv_post_state_restore,
|
||||
.hwapic_irr_update = vmx_hwapic_irr_update,
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
||||
|
||||
@ -14130,6 +14191,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.umip_emulated = vmx_umip_emulated,
|
||||
|
||||
.check_nested_events = vmx_check_nested_events,
|
||||
.request_immediate_exit = vmx_request_immediate_exit,
|
||||
|
||||
.sched_in = vmx_sched_in,
|
||||
|
||||
|
@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
|
||||
gfn_t gfn;
|
||||
int r;
|
||||
|
||||
if (is_long_mode(vcpu) || !is_pae(vcpu))
|
||||
if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
|
||||
return false;
|
||||
|
||||
if (!test_bit(VCPU_EXREG_PDPTR,
|
||||
@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
break;
|
||||
case MSR_PLATFORM_INFO:
|
||||
if (!msr_info->host_initiated ||
|
||||
data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
|
||||
(!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
|
||||
cpuid_fault_enabled(vcpu)))
|
||||
return 1;
|
||||
@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = vcpu->arch.osvw.status;
|
||||
break;
|
||||
case MSR_PLATFORM_INFO:
|
||||
if (!msr_info->host_initiated &&
|
||||
!vcpu->kvm->arch.guest_can_read_msr_platform_info)
|
||||
return 1;
|
||||
msr_info->data = vcpu->arch.msr_platform_info;
|
||||
break;
|
||||
case MSR_MISC_FEATURES_ENABLES:
|
||||
@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_SPLIT_IRQCHIP:
|
||||
case KVM_CAP_IMMEDIATE_EXIT:
|
||||
case KVM_CAP_GET_MSR_FEATURES:
|
||||
case KVM_CAP_MSR_PLATFORM_INFO:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_SYNC_REGS:
|
||||
@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
break;
|
||||
|
||||
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
|
||||
r = -EFAULT;
|
||||
if (get_user(user_data_size, &user_kvm_nested_state->size))
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
||||
r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
|
||||
user_data_size);
|
||||
if (r < 0)
|
||||
return r;
|
||||
break;
|
||||
|
||||
if (r > user_data_size) {
|
||||
if (put_user(r, &user_kvm_nested_state->size))
|
||||
return -EFAULT;
|
||||
return -E2BIG;
|
||||
r = -EFAULT;
|
||||
else
|
||||
r = -E2BIG;
|
||||
break;
|
||||
}
|
||||
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
if (!kvm_x86_ops->set_nested_state)
|
||||
break;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
||||
r = -EINVAL;
|
||||
if (kvm_state.size < sizeof(kvm_state))
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
if (kvm_state.flags &
|
||||
~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
/* nested_run_pending implies guest_mode. */
|
||||
if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
|
||||
break;
|
||||
@ -4350,6 +4359,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
||||
kvm->arch.pause_in_guest = true;
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_MSR_PLATFORM_INFO:
|
||||
kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
|
||||
r = 0;
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
@ -7361,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
|
||||
|
||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
|
||||
|
||||
/*
|
||||
* Returns 1 to let vcpu_run() continue the guest execution loop without
|
||||
* exiting to the userspace. Otherwise, the value will be returned to the
|
||||
@ -7565,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (req_immediate_exit) {
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
kvm_x86_ops->request_immediate_exit(vcpu);
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
@ -7829,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Swap (qemu) user FPU context for the guest FPU context. */
|
||||
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
preempt_enable();
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
/* When vcpu_run ends, restore user space FPU context. */
|
||||
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
|
||||
copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
|
||||
preempt_enable();
|
||||
++vcpu->stat.fpu_reload;
|
||||
trace_kvm_fpu(0);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
int r;
|
||||
@ -8177,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
kvm_update_cpuid(vcpu);
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
|
||||
if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
|
||||
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
|
||||
mmu_reset_needed = 1;
|
||||
}
|
||||
@ -8406,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.cr0 |= X86_CR0_ET;
|
||||
}
|
||||
|
||||
/* Swap (qemu) user FPU context for the guest FPU context. */
|
||||
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
preempt_enable();
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
/* When vcpu_run ends, restore user space FPU context. */
|
||||
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
|
||||
copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
|
||||
preempt_enable();
|
||||
++vcpu->stat.fpu_reload;
|
||||
trace_kvm_fpu(0);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
||||
@ -8852,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
|
||||
kvm->arch.guest_can_read_msr_platform_info = true;
|
||||
|
||||
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
|
||||
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
|
||||
|
||||
@ -9200,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
kvm_page_track_flush_slot(kvm, slot);
|
||||
}
|
||||
|
||||
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (is_guest_mode(vcpu) &&
|
||||
kvm_x86_ops->guest_apic_has_interrupt &&
|
||||
kvm_x86_ops->guest_apic_has_interrupt(vcpu));
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!list_empty_careful(&vcpu->async_pf.done))
|
||||
@ -9224,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
kvm_cpu_has_interrupt(vcpu))
|
||||
(kvm_cpu_has_interrupt(vcpu) ||
|
||||
kvm_guest_apic_has_interrupt(vcpu)))
|
||||
return true;
|
||||
|
||||
if (kvm_hv_has_stimer_pending(vcpu))
|
||||
|
@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
|
||||
set_memory_np_noalias(begin_ul, len_pages);
|
||||
}
|
||||
|
||||
void __weak mem_encrypt_free_decrypted_mem(void) { }
|
||||
|
||||
void __ref free_initmem(void)
|
||||
{
|
||||
e820__reallocate_tables();
|
||||
|
||||
mem_encrypt_free_decrypted_mem();
|
||||
|
||||
free_kernel_image_pages(&__init_begin, &__init_end);
|
||||
}
|
||||
|
||||
|
@ -348,6 +348,30 @@ bool sev_active(void)
|
||||
EXPORT_SYMBOL(sev_active);
|
||||
|
||||
/* Architecture __weak replacement functions */
|
||||
void __init mem_encrypt_free_decrypted_mem(void)
|
||||
{
|
||||
unsigned long vaddr, vaddr_end, npages;
|
||||
int r;
|
||||
|
||||
vaddr = (unsigned long)__start_bss_decrypted_unused;
|
||||
vaddr_end = (unsigned long)__end_bss_decrypted;
|
||||
npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* The unused memory range was mapped decrypted, change the encryption
|
||||
* attribute from decrypted to encrypted before freeing it.
|
||||
*/
|
||||
if (mem_encrypt_active()) {
|
||||
r = set_memory_encrypted(vaddr, npages);
|
||||
if (r) {
|
||||
pr_warn("failed to free unused decrypted pages\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
free_init_pages("unused decrypted", vaddr, vaddr_end);
|
||||
}
|
||||
|
||||
void __init mem_encrypt_init(void)
|
||||
{
|
||||
if (!sme_me_mask)
|
||||
|
@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
|
||||
{
|
||||
unsigned long address = __fix_to_virt(idx);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Ensure that the static initial page tables are covering the
|
||||
* fixmap completely.
|
||||
*/
|
||||
BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
|
||||
(FIXMAP_PMD_NUM * PTRS_PER_PTE));
|
||||
#endif
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
|
@ -85,10 +85,9 @@ pgd_t * __init efi_call_phys_prolog(void)
|
||||
|
||||
void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
||||
{
|
||||
load_fixmap_gdt(0);
|
||||
load_cr3(save_pgd);
|
||||
__flush_tlb_all();
|
||||
|
||||
load_fixmap_gdt(0);
|
||||
}
|
||||
|
||||
void __init efi_runtime_update_mappings(void)
|
||||
|
@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
/* L3_k[511] -> level2_fixmap_pgt */
|
||||
convert_pfn_mfn(level3_kernel_pgt);
|
||||
|
||||
/* L3_k[511][506] -> level1_fixmap_pgt */
|
||||
/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
|
||||
convert_pfn_mfn(level2_fixmap_pgt);
|
||||
|
||||
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
|
||||
@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
|
||||
|
||||
for (i = 0; i < FIXMAP_PMD_NUM; i++) {
|
||||
set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
|
||||
PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
/* Pin down new L4 */
|
||||
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
|
||||
|
@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
|
||||
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
int err, ret = IRQ_NONE;
|
||||
struct pt_regs regs;
|
||||
struct pt_regs regs = {0};
|
||||
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
|
||||
uint8_t xenpmu_flags = get_xenpmu_flags();
|
||||
|
||||
|
@ -4,6 +4,7 @@ config ZONE_DMA
|
||||
|
||||
config XTENSA
|
||||
def_bool y
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
|
||||
|
@ -64,11 +64,7 @@ endif
|
||||
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
|
||||
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
|
||||
|
||||
ifeq ($(KBUILD_SRC),)
|
||||
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
|
||||
else
|
||||
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
|
||||
endif
|
||||
|
||||
KBUILD_DEFCONFIG := iss_defconfig
|
||||
|
||||
|
@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
|
||||
|
||||
void __init platform_setup(char **p_cmdline)
|
||||
{
|
||||
static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
|
||||
static char cmdline[COMMAND_LINE_SIZE] __initdata;
|
||||
int argc = simc_argc();
|
||||
int argv_size = simc_argv_size();
|
||||
|
||||
if (argc > 1) {
|
||||
void **argv = alloc_bootmem(argv_size);
|
||||
char *cmdline = alloc_bootmem(argv_size);
|
||||
int i;
|
||||
if (argv_size > sizeof(argv)) {
|
||||
pr_err("%s: command line too long: argv_size = %d\n",
|
||||
__func__, argv_size);
|
||||
} else {
|
||||
int i;
|
||||
|
||||
cmdline[0] = 0;
|
||||
simc_argv((void *)argv);
|
||||
cmdline[0] = 0;
|
||||
simc_argv((void *)argv);
|
||||
|
||||
for (i = 1; i < argc; ++i) {
|
||||
if (i > 1)
|
||||
strcat(cmdline, " ");
|
||||
strcat(cmdline, argv[i]);
|
||||
for (i = 1; i < argc; ++i) {
|
||||
if (i > 1)
|
||||
strcat(cmdline, " ");
|
||||
strcat(cmdline, argv[i]);
|
||||
}
|
||||
*p_cmdline = cmdline;
|
||||
}
|
||||
*p_cmdline = cmdline;
|
||||
}
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
|
||||
|
@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
|
||||
const int sgrp = op_stat_group(req_op);
|
||||
int cpu = part_stat_lock();
|
||||
|
||||
part_stat_add(cpu, part, ticks[sgrp], duration);
|
||||
part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
|
||||
part_round_stats(q, cpu, part);
|
||||
part_dec_in_flight(q, part, op_is_write(req_op));
|
||||
|
||||
|
@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
||||
if (!blkcg_policy[i])
|
||||
break;
|
||||
if (i >= BLKCG_MAX_POLS)
|
||||
if (i >= BLKCG_MAX_POLS) {
|
||||
pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
|
||||
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
|
||||
|
@ -2733,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now)
|
||||
* containing request is enough.
|
||||
*/
|
||||
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
|
||||
unsigned long duration;
|
||||
const int sgrp = op_stat_group(req_op(req));
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
duration = nsecs_to_jiffies(now - req->start_time_ns);
|
||||
cpu = part_stat_lock();
|
||||
part = req->part;
|
||||
|
||||
part_stat_inc(cpu, part, ios[sgrp]);
|
||||
part_stat_add(cpu, part, ticks[sgrp], duration);
|
||||
part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
|
||||
part_round_stats(req->q, cpu, part);
|
||||
part_dec_in_flight(req->q, part, rq_data_dir(req));
|
||||
|
||||
|
@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
|
||||
/*
|
||||
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
|
||||
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter
|
||||
* to avoid race with it. __blk_mq_update_nr_hw_queues will users
|
||||
* synchronize_rcu to ensure all of the users go out of the critical
|
||||
* section below and see zeroed q_usage_counter.
|
||||
* queue_hw_ctx after freeze the queue, so we use q_usage_counter
|
||||
* to avoid race with it.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (percpu_ref_is_zero(&q->q_usage_counter)) {
|
||||
rcu_read_unlock();
|
||||
if (!percpu_ref_tryget(&q->q_usage_counter))
|
||||
return;
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
||||
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
|
||||
|
@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
BUG_ON(!rq->q);
|
||||
if (rq->mq_ctx != this_ctx) {
|
||||
if (this_ctx) {
|
||||
trace_block_unplug(this_q, depth, from_schedule);
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_q, this_ctx,
|
||||
&ctx_list,
|
||||
from_schedule);
|
||||
@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
* on 'ctx_list'. Do those.
|
||||
*/
|
||||
if (this_ctx) {
|
||||
trace_block_unplug(this_q, depth, from_schedule);
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
|
||||
from_schedule);
|
||||
}
|
||||
|
@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
|
||||
|
||||
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
|
||||
;
|
||||
if (q->nr_sorted && printed++ < 10) {
|
||||
if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
|
||||
printk(KERN_ERR "%s: forced dispatching is broken "
|
||||
"(nr_sorted=%u), please report this\n",
|
||||
q->elevator->type->elevator_name, q->nr_sorted);
|
||||
|
@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
||||
part_stat_read(hd, ios[STAT_READ]),
|
||||
part_stat_read(hd, merges[STAT_READ]),
|
||||
part_stat_read(hd, sectors[STAT_READ]),
|
||||
jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
|
||||
(unsigned int)part_stat_read_msecs(hd, STAT_READ),
|
||||
part_stat_read(hd, ios[STAT_WRITE]),
|
||||
part_stat_read(hd, merges[STAT_WRITE]),
|
||||
part_stat_read(hd, sectors[STAT_WRITE]),
|
||||
jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
|
||||
(unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
|
||||
inflight[0],
|
||||
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
||||
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
|
||||
part_stat_read(hd, ios[STAT_DISCARD]),
|
||||
part_stat_read(hd, merges[STAT_DISCARD]),
|
||||
part_stat_read(hd, sectors[STAT_DISCARD]),
|
||||
jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
|
||||
(unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
|
||||
);
|
||||
}
|
||||
disk_part_iter_exit(&piter);
|
||||
|
@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
|
||||
part_stat_read(p, ios[STAT_READ]),
|
||||
part_stat_read(p, merges[STAT_READ]),
|
||||
(unsigned long long)part_stat_read(p, sectors[STAT_READ]),
|
||||
jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
|
||||
(unsigned int)part_stat_read_msecs(p, STAT_READ),
|
||||
part_stat_read(p, ios[STAT_WRITE]),
|
||||
part_stat_read(p, merges[STAT_WRITE]),
|
||||
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
|
||||
jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
|
||||
(unsigned int)part_stat_read_msecs(p, STAT_WRITE),
|
||||
inflight[0],
|
||||
jiffies_to_msecs(part_stat_read(p, io_ticks)),
|
||||
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
|
||||
part_stat_read(p, ios[STAT_DISCARD]),
|
||||
part_stat_read(p, merges[STAT_DISCARD]),
|
||||
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
|
||||
jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
|
||||
(unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
|
||||
}
|
||||
|
||||
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user